2017-07-14 17 views
1

varという名前の行に埋め込まれたSQL行をベクトルに変換する必要があります。私はspark scala rdd sql行をベクトルに変換

val df = sqlContext.sql("SELECT age,gender FROM test.test2") 
val rows: org.apache.spark.rdd.RDD[org.apache.spark.sql.Row] = df.rdd 
val doubVals = rows.map{ row => row.getDouble(0) } 
val vector = Vectors.dense{ doubVals.collect} 

以下の手順を使用しますが、それはClassNotFoundExceptionが

scala> val vector = Vectors.dense{ doubVals.collect} 
WARN 2017-07-14 02:12:09,477 org.apache.spark.scheduler.TaskSetManager: 
Lost task 0.0 in stage 2.0 (TID 7, 192.168.110.200): 
java.lang.ClassNotFoundException: 



    $line31.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw 
    $$iw$$iw$$iw$$iw$$anonfun$1 
    at java.net.URLClassLoader.findClass(URLClassLoader.java:381) 
    at java.lang.ClassLoader.loadClass(ClassLoader.java:424) 
    at java.lang.ClassLoader.loadClass(ClassLoader.java:357) 
    at java.lang.Class.forName0(Native Method) 
    at java.lang.Class.forName(Class.java:348) 
    at org.apache.spark.serializer.JavaDeserializationStream$$anon$1.resolveClass(JavaSerializer.scala:67) 
    at java.io.ObjectInputStream.readNonProxyDesc(ObjectInputStream.java:1826) 
    at java.io.ObjectInputStream.readClassDesc(ObjectInputStream.java:1713) 
    at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2000) 
    at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1535) 
    at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2245) 
    at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2169) 
    at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2027) 
    at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1535) 
    at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2245) 
    at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2169) 
    at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2027) 
    at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1535) 
    at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2245) 
    at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2169) 
    at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2027) 
    at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1535) 
    at java.io.ObjectInputStream.readObject(ObjectInputStream.java:422) 
    at org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:75) 
    at org.apache.spark.serializer.JavaSerializerInstance.deserialize(JavaSerializer.scala:114) 
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66) 
    at org.apache.spark.scheduler.Task.run(Task.scala:86) 
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274) 
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) 
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) 
    at java.lang.Thread.run(Thread.java:748) 

    [Stage 2:>               (0 + 
3)/7]ERROR 2017-07-14 02:12:09,787 
    org.apache.spark.scheduler.TaskSetManager: Task 2 in stage 2.0 failed 4 
    times; aborting job 
org.apache.spark.SparkException: Job aborted due to stage failure: Task 2 
in stage 2.0 failed 4 times, most recent failure: Lost task 2.3 in stage 
    2.0 (TID 21, 192.168.110.200): java.lang.ClassNotFoundException: $anonfun$1 
    at java.net.URLClassLoader.findClass(URLClassLoader.java:381) 
    at java.lang.ClassLoader.loadClass(ClassLoader.java:424) 
    at java.lang.ClassLoader.loadClass(ClassLoader.java:357) 
    at java.lang.Class.forName0(Native Method) 
    at java.lang.Class.forName(Class.java:348) 
    at org.apache.spark.serializer.JavaDeserializationStream$$anon$1.resolveClass(JavaSerializer.scala:67) 
    at java.io.ObjectInputStream.readNonProxyDesc(ObjectInputStream.java:1826) 
    at java.io.ObjectInputStream.readClassDesc(ObjectInputStream.java:1713) 
    at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2000) 
    at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1535) 
    at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2245) 
    at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2169) 
    at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2027) 
    at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1535) 
    at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2245) 
    at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2169) 
    at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2027) 
    at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1535) 
    at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2245) 
    at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2169) 
    at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2027) 
    at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1535) 
    at java.io.ObjectInputStream.readObject(ObjectInputStream.java:422) 
    at org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:75) 
    at org.apache.spark.serializer.JavaSerializerInstance.deserialize(JavaSerializer.scala:114) 
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66) 
    at org.apache.spark.scheduler.Task.run(Task.scala:86) 
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274) 
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) 
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) 
    at java.lang.Thread.run(Thread.java:748) 

ような例外の多くを与えるが、それは私に例外を与える:ClassNotFoundExceptionが

あなたは、このエラーを解決するために私を助けていただけますか?次の手順で

+0

あなたをデバッグするためのヒントを与える必要がありますか?ベクトルを作成した後のあなたのコードも? –

+0

ok wil update the – Amalo

+0

これを忘れましたか: 'import org.apache.spark.mllib.linalg.Vectors' – philantrovert

答えて

0

見て(それは私を許可しない)

scala> val df = Seq(2.0,3.0,3.2,2.3,1.2).toDF("col") 
df: org.apache.spark.sql.DataFrame = [col: double] 

scala> import org.apache.spark.mllib.linalg.Vectors 
import org.apache.spark.mllib.linalg.Vectors 

scala> val rows = df.rdd 
rows: org.apache.spark.rdd.RDD[org.apache.spark.sql.Row] = MapPartitionsRDD[3] at rdd at <console>:31 

scala> val doubVals = rows.map{ row => row.getDouble(0) } 
doubVals: org.apache.spark.rdd.RDD[Double] = MapPartitionsRDD[4] at map at <console>:33 

scala> val vector = Vectors.dense{ doubVals.collect} 
vector: org.apache.spark.mllib.linalg.Vector = [2.0,3.0,3.2,2.3,1.2] 

これは、あなたが完全なエラーメッセージを投稿することができます

+0

これはどうしたらいいですか?私のエラーはどこですか? – Amalo

+0

私の答えは、このようにするとエラーがないことを示しています。あなたは同じことをしましたか? –

+0

これらの手順に従うと、同じエラーが表示されます。 – Amalo

関連する問題