= glueContext.create_dynamic_frame.from_catalog( database="sample_partition_100_db", table_name="partition100_date_csv") ## Transform Type df = ApplyMapping.apply( frame=df, mappings=[ ("id", "string", "id", "string"), ("uuid", "string", "uuid", "string"), ("target_date", "string", "target_date", "date") ]).toDF() ## Store tmp table df.createOrReplaceTempView("tmp_partition100_date_csv") ## Merge spark.sql(''' MERGE INTO glue_catalog.sample_partition_100_db.partition100_date_i ceberg t USING tmp_partition100_date_csv s ON (t.id = s.id) WHEN MATCHED THEN UPDATE SET t.uuid = s.uuid, t.target_date = s.target_date WHEN NOT MATCHED THEN INSERT * ''' ) job.commit()