运行
mport org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext} /**
* Created by Lee_Rz on 2017/8/30.
*/
object SparkDemo {
def main(args: Array[String]) {
Logger.getLogger("org.apache.spark").setLevel(Level.OFF)
val sc: SparkContext = new SparkContext(new SparkConf().setAppName(this.getClass().getName()).setMaster("local[2]"))
val rdd1: RDD[String] = sc.textFile("C:\\Users\\166\\Desktop\\text.txt") //一行一行的读数据 //懒算子
val key: RDD[(String, Int)] = rdd1.flatMap(_.split(" ")).map((_,)).reduceByKey(_+_)
println(key.collect().toBuffer)//收集到Driver
}
}
报错
Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
// :: WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
// :: INFO Slf4jLogger: Slf4jLogger started
// :: INFO Remoting: Starting remoting
// :: INFO Remoting: Remoting started; listening on addresses :[akka.tcp://sparkDriverActorSystem@192.168.0.166:51388]
// :: ERROR Shell: Failed to locate the winutils binary in the hadoop binary path
java.io.IOException: Could not locate executable null\bin\winutils.exe in the Hadoop binaries.
at org.apache.hadoop.util.Shell.getQualifiedBinPath(Shell.java:)
at org.apache.hadoop.util.Shell.getWinUtilsPath(Shell.java:)
at org.apache.hadoop.util.Shell.<clinit>(Shell.java:)
at org.apache.hadoop.util.StringUtils.<clinit>(StringUtils.java:)
at org.apache.hadoop.mapred.FileInputFormat.setInputPaths(FileInputFormat.java:)
at org.apache.spark.SparkContext$$anonfun$hadoopFile$$$anonfun$.apply(SparkContext.scala:)
at org.apache.spark.SparkContext$$anonfun$hadoopFile$$$anonfun$.apply(SparkContext.scala:)
at org.apache.spark.rdd.HadoopRDD$$anonfun$getJobConf$.apply(HadoopRDD.scala:)
at org.apache.spark.rdd.HadoopRDD$$anonfun$getJobConf$.apply(HadoopRDD.scala:)
at scala.Option.map(Option.scala:)
at org.apache.spark.rdd.HadoopRDD.getJobConf(HadoopRDD.scala:)
at org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:)
at org.apache.spark.rdd.RDD$$anonfun$partitions$.apply(RDD.scala:)
at org.apache.spark.rdd.RDD$$anonfun$partitions$.apply(RDD.scala:)
at scala.Option.getOrElse(Option.scala:)
at org.apache.spark.rdd.RDD.partitions(RDD.scala:)
at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:)
at org.apache.spark.rdd.RDD$$anonfun$partitions$.apply(RDD.scala:)
at org.apache.spark.rdd.RDD$$anonfun$partitions$.apply(RDD.scala:)
at scala.Option.getOrElse(Option.scala:)
at org.apache.spark.rdd.RDD.partitions(RDD.scala:)
at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:)
at org.apache.spark.rdd.RDD$$anonfun$partitions$.apply(RDD.scala:)
at org.apache.spark.rdd.RDD$$anonfun$partitions$.apply(RDD.scala:)
at scala.Option.getOrElse(Option.scala:)
at org.apache.spark.rdd.RDD.partitions(RDD.scala:)
at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:)
at org.apache.spark.rdd.RDD$$anonfun$partitions$.apply(RDD.scala:)
at org.apache.spark.rdd.RDD$$anonfun$partitions$.apply(RDD.scala:)
at scala.Option.getOrElse(Option.scala:)
at org.apache.spark.rdd.RDD.partitions(RDD.scala:)
at org.apache.spark.Partitioner$.defaultPartitioner(Partitioner.scala:)
at org.apache.spark.rdd.PairRDDFunctions$$anonfun$reduceByKey$.apply(PairRDDFunctions.scala:)
at org.apache.spark.rdd.PairRDDFunctions$$anonfun$reduceByKey$.apply(PairRDDFunctions.scala:)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:)
at org.apache.spark.rdd.RDD.withScope(RDD.scala:)
at org.apache.spark.rdd.PairRDDFunctions.reduceByKey(PairRDDFunctions.scala:)
at zx.SparkDemo$.main(SparkDemo.scala:)
at zx.SparkDemo.main(SparkDemo.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:)
at java.lang.reflect.Method.invoke(Method.java:)
at com.intellij.rt.execution.application.AppMain.main(AppMain.java:)
// :: INFO FileInputFormat: Total input paths to process :
// :: INFO deprecation: mapred.tip.id is deprecated. Instead, use mapreduce.task.id
// :: INFO deprecation: mapred.task.id is deprecated. Instead, use mapreduce.task.attempt.id
// :: INFO deprecation: mapred.task.is.map is deprecated. Instead, use mapreduce.task.ismap
// :: INFO deprecation: mapred.task.partition is deprecated. Instead, use mapreduce.task.partition
// :: INFO deprecation: mapred.job.id is deprecated. Instead, use mapreduce.job.id
ArrayBuffer((are,), (hello,), (any,), (ok,), (world,), (me,), (alone,), (you,), (no,), (believie,), (more,))
// :: INFO RemoteActorRefProvider$RemotingTerminator: Shutting down remote daemon. Process finished with exit code
检查发现hadoop下bin目录下已经存在winutils.exe,检查hadoop的path路径,发现没有严格按照格式创建hadoop的path,真确的格式是HADOOP_HOME=......,因为在hadoop的生态圈中很多框架都是依赖hadoop的,所以他们的配置文件中,默认的export的hadoop路径是格式是HADOOP_HOME