%scala
import scala.sys.process._
// Choose a name for your resulting table in Databricks
var tableName = "user_mapping"
// Replace this URL with the one from your Google Spreadsheets
// Click on File --> Publish to the Web --> Option CSV and copy the URL
var url = "https://docs.google.com/spreadsheets/d/e/2PACX-1vRkLRx_GDKr1AN0hdhPlqkfwEZXPLAyRz9j-t9oRcsYdusI0Id4zD8TX9PbsSTnTsirDQDK3sfHwpGu/pub?output=csv"
var localpath = "/tmp/" + tableName + ".csv"
dbutils.fs.rm("file:" + localpath)
"wget -O " + localpath + " " + url !!
dbutils.fs.mkdirs("dbfs:/datasets/gsheets")
dbutils.fs.cp("file:" + localpath, "dbfs:/datasets/gsheets")
sqlContext.sql("drop table if exists " + tableName)
var df = spark.read.option("header", "true").option("inferSchema", "true").csv("/datasets/gsheets/" + tableName + ".csv");
df.write.saveAsTable(tableName);