Saturday, July 25, 2020

Azure Databricks - Spark Dataframe SELECT cheat sheet for SQL developers

// import necessary spark sql libraries
import org.apache.spark.sql.functions._

// The sales dataset from kaggle is loaded in Databricks filesystem
// The dataset was taken from https://www.kaggle.com/manjeetsingh/retaildataset
// sales_data_set.csv
// datasets_2296_3883_stores_data_set.csv
dbutils.fs.ls("/FileStore/files/kaggle")

// local settings
val inputFileSales = "dbfs:/FileStore/files/kaggle/sales_data_set.csv"
val inputFileStores = "dbfs:/FileStore/files/kaggle/datasets_2296_3883_stores_data_set.csv"
val fileFormat = "csv"

// load the csv file into a dataframe
val df = spark
          .read
          .format(fileFormat)
          .option("header", true)
          .option("inferSchema", "true")
          .load(inputFileSales)

//load stores data file into a dataframe
val dfStores = spark
          .read
          .format(fileFormat)
          .option("header", true)
          .option("inferSchema", "true")
          .load(inputFileStores)

// check the automatically inferred schema of the dataframe
df.printSchema()



// SELECT * FROM df
df.select("*").show()



// SELECT TOP 5 * FROM df
df.select("*").show(5)



// SELECT count(1) FROM df
df.count()



// SELECT Store, Dept, Date, Weekly_Sales, IsHoliday FROM df
df.select("Store", "Dept", "Date", "Weekly_Sales", "IsHoliday").show(5)



// SELECT DISTINCT Store FROM df
df.select("Store").distinct.show(5)



// SELECT DISTINCT Store FROM df ORDER BY Store ASC
df.select("Store").distinct.orderBy(asc("Store")).show(5)



// SELECT DISTINCT Store FROM df ORDER BY Store DESC
df.select("Store").distinct.orderBy(desc("Store")).show(5)



//SELECT * FROM df WHERE Store = 2
df.select("*").filter("Store = 2").show(5)



// SELECT * FROM df WHERE Store = 2 AND Dept = 3
df.select("*").filter("Store = 2 and Dept = 3").show(5)



// DECLARE @count INT
// SET @count = SELECT * FROM df WHERE Store = 2 AND Dept = 3
val count1 = df.select("*").filter("Store = 2 and Dept = 3").count()
val count2 = df.select("*").where("Store = 2 and Dept = 3").count()

println("Number of rows (Count1): " + count1)
println("Number of rows (Count2): " + count2)



// SELECT * FROM df WHERE Store = 2 OR Store = 3
df.select("*").filter("Store = 2 or Store = 3").show(5)



// SELECT * FROM df WHERE Weekly_Sales > 60000.01
df.select("*").filter("Weekly_Sales > 60000.01").show(5)



// SELECT * FROM df WHERE IsHoliday IS true
df.select("*").filter("IsHoliday = 'true'").show(2)
df.select("*").filter("IsHoliday = true").show(2)
df.select("*").filter("IsHoliday IS true").show(2)
df.select("*").filter("IsHoliday IS NOT false").show(2)



// SELECT * FROM df WHERE Weekly_Sales BETWEEN 60000.00 AND 61000.00
df.select("*").filter("Weekly_Sales between 60000.00 and 61000.00").show(5)



// SELECT * FROM df WHERE Date = '03/09/2010'
df.select("*").filter("Date = '01/04/2011'").show(5)



That's it!
Cheers!

No comments:

Post a Comment