! val (month, keyspace, interimSave) = parseParams(args, config)! ! val sc = new SparkContext(sparkConf)! ! val readings = sc.cassandraTable("ser", "profile_reads").filter({r: CassandraRow => r.getString("month") == month })! ! val grouped = readings.groupBy({ r: CassandraRow => r.getLong("mxpn")})! ! def buildStructurePartial = buildStructure _! ! val initial = grouped.map(buildStructurePartial.tupled).flatMap(m => m)! ! val baseload = initial.map({ x: MonthlyBreakdown => {! x.markProcessed(getBaseload(x), "appliances", "baseload")! x }})! ! val fridged = baseload.map( { x: MonthlyBreakdown => {! x.markProcessed(getFridge(x), "appliances", "fridge")! x }})! ! if( interimSave ) {! val fridgeresults = fridged.map({ x: MonthlyBreakdown => x.interimStage("fridge")}).flatMap(identity).flatMap(identity)! fridgeresults.saveToCassandra(keyspace, "interim_breakdown", Seq("mxpn", "stage", "month", "readdate", "breakdown"))! }! ! val results = fridged.map({x: MonthlyBreakdown => x.rollupResults()}).flatMap(identity)! ! results.saveToCassandra( keyspace, "energy_breakdown", Seq("business_partner_id", "premise_id", "mpxn", "customer_type", "start_date", "end_date", "group", "energy_type", (3) Data science productionising Spark allows us to chain different algorithm