schema='<schema>'
table='<table>'
columnstoDrop = ["<column or columns to drop>"]
display(spark.sql(f"describe table {schema}.{table}"))
df = spark.sql(f"Select * from {schema}.{table}")
df1 = df.drop(*columnstoDrop)
spark.sql(f"DROP TABLE if exists {schema}.{table}")
df1.write
.format(write_format)
.mode('overwrite')
.option("overwriteSchema", "true")
.save(f"{save_path}{table}")
spark.sql(f"CREATE TABLE {schema}.{table} USING DELTA LOCATION '{save_path}{table}'")
display(spark.sql(f"describe table {schema}.{table}"))