import findspark
findspark.init('/Users/donghua/spark-2.4.0-bin-hadoop2.7')
from pyspark import SparkContext
# SparkContext(master=None, appName=None,...)
sc = SparkContext('local','handson Spark')
visitors = [10,3,35,25,41,9,29]
rdd_visitors = sc.parallelize(visitors)
array_visitors_yearly = rdd_visitors.map(lambda x: x*365).collect()
print(array_visitors_yearly)
from pyspark.sql import SparkSession
# builder.master("xx").appName("xxx")
spark = SparkSession(sc).builder.getOrCreate()
text_file = spark.read.text('file:///Users/donghua/spark-2.4.0-bin-hadoop2.7/README.md')
text_file.count()
text_file.first()
lines_with_spark = text_file.filter(text_file.value.contains("Spark"))
lines_with_spark.show(5)
lines_with_spark.count()
sc.stop()
Output:
Donghuas-MacBook-Air:spark-2.4.0-bin-hadoop2.7 donghua$ python chap1.py
Setting default log level to "WARN".
To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).
2019-03-27 20:44:18 WARN Utils:66 - Service 'SparkUI' could not bind on port 4040. Attempting port 4041.
[3650, 1095, 12775, 9125, 14965, 3285, 10585]
2019-03-27 20:44:26 WARN NativeCodeLoader:62 - Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
+--------------------+
| value|
+--------------------+
| # Apache Spark|
|Spark is a fast a...|
|rich set of highe...|
|and Spark Streami...|
|You can find the ...|
+--------------------+
only showing top 5 rows
Some of the code referenced from hands-pyspark-big-data-analysis-video
No comments:
Post a Comment