Add new columns in dataframe
from pyspark.sql.functions import col, lit# File location and typefile_location = “/FileStore/tables/sales_data_part1.csv”file_type = “csv”# CSV optionsinfer_schema = “false”first_row_is_header = “true”delimiter = “,”# The applied options are for CSV files. For other file types, these will # be ignored.df = spark.read.format(file_type) .option(“inferSchema”, infer_schema) .option(“header”, first_row_is_header) .option(“sep”, delimiter) .load(file_location)display(df)# Adding new column with and with default values. #…