scala> val sqlContext = new org.apache.spark.sql.hive.HiveContext(sc)
scala> sqlContext.sql("create table IF NOT EXISTS case2 (case_id string, province string, city string, group string, infection_case string, confirmed float, latitude float, longitude float) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n'")
scala> sqlContext.sql("LOAD DATA LOCAL INPATH '/home/scott/Case.csv' INTO TABLE case2")
acala> sql(""" select province, sum(confirmed)
| from case2
| group by province""").coalesce(1).write.option("header","true").option("sep",",").mode("overwrite").csv("/home/scott/dd")
[scott@centos ~]$ cd /home/scott/dd
[scott@centos dd]$ mv part-r-00000-ea03904f-c304-448e-865b-6c0924abb6f6.csv case2.csv
[scott@centos dd]$ cp ./case2.csv /home/scott/case2.csv