|
本帖最后由 785983255ll 于 2022-12-6 19:33 编辑
"""
将rdd输出到文件中"""
from pyspark import SparkConf, SparkContext
import os
os.environ['PYSPARK_PYTHON'] = "C:\\Users\\22525\\anaconda3\\envs\\pyspark\\python.exe"
os.environ['HADOOP_HOME'] = 'D:/dev/hadoop-3.0.0'
conf = SparkConf().setMaster("local").setAppName("test_spark")
sc = SparkContext(conf=conf)
# 准备一个rdd1
rdd1 = sc.parallelize([1, 2, 3, 4, 5])
# 准备一个rdd2
rdd2 = sc.parallelize([('hello', 1), ('spark', 5), ('hi', 7)])
# 准备一个rdd3
rdd3 = sc.parallelize([[1, 3, 5], [7, 9, 11], [13, 15, 17]])
# 将rdd输出到文件中
rdd1.saveAsTextFile("d:/test1")
rdd2.saveAsTextFile("d:/test2")
rdd3.saveAsTextFile("d:/test3")
|
|