I am trying to write a dataframe to cassandra table but encountering an error ...... I could not find the solution

-2

Traceback (most recent call last):

File "py_spark.py", line 235, in .options(table = "struct_str", keyspace = "acrs")\ File "/usr/lib/python3.6/site-packages/pyspark/sql/readwriter.py", line 732, in save self._jwrite.save() File "/usr/lib/python3.6/site-packages/py4j/java_gateway.py", line 1257, in call answer, self.gateway_client, self.target_id, self.name) File "/usr/lib/python3.6/site-packages/pyspark/sql/utils.py", line 63, in deco return f(*a, **kw) File "/usr/lib/python3.6/site-packages/py4j/protocol.py", line 328, in get_return_value format(target_id, ".", name), value) py4j.protocol.Py4JJavaError: An error occurred while calling o654.save. : org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 1.0 failed 1 times, most recent failure: Lost task 0.0 in stage 1.0 (TID 8, localhost, executor driver): com.datastax.spark.connector.types.TypeConversionException: Cannot convert object 0x000003e7 of type class java.lang.String to java.nio.ByteBuffer. at com.datastax.spark.connector.types.TypeConverter$$anonfun$convert$1.apply(TypeConverter.scala:43) at com.datastax.spark.connector.types.TypeConverter$ByteBufferConverter$$anonfun$convertPF$11.applyOrElse(TypeConverter.scala:256) at com.datastax.spark.connector.types.TypeConverter$class.convert(TypeConverter.scala:41) at com.datastax.spark.connector.types.TypeConverter$ByteBufferConverter$.com$datastax$spark$connector$types$NullableTypeConverter$$super$convert(TypeConverter.scala:253) at com.datastax.spark.connector.types.NullableTypeConverter$class.convert(TypeConverter.scala:54) at com.datastax.spark.connector.types.TypeConverter$ByteBufferConverter$.convert(TypeConverter.scala:253) at com.datastax.spark.connector.types.TypeConverter$OptionToNullConverter$$anonfun$convertPF$36.applyOrElse(TypeConverter.scala:796) at com.datastax.spark.connector.types.TypeConverter$class.convert(TypeConverter.scala:41) at com.datastax.spark.connector.types.TypeConverter$OptionToNullConverter.com$datastax$spark$connector$types$NullableTypeConverter$$super$convert(TypeConverter.scala:779) at com.datastax.spark.connector.types.NullableTypeConverter$class.convert(TypeConverter.scala:54) at com.datastax.spark.connector.types.TypeConverter$OptionToNullConverter.convert(TypeConverter.scala:779) at com.datastax.spark.connector.writer.SqlRowWriter$$anonfun$readColumnValues$1.apply$mcVI$sp(SqlRowWriter.scala:26) at scala.collection.immutable.Range.foreach$mVc$sp(Range.scala:160) at com.datastax.spark.connector.writer.SqlRowWriter.readColumnValues(SqlRowWriter.scala:24) at com.datastax.spark.connector.writer.SqlRowWriter.readColumnValues(SqlRowWriter.scala:12) at com.datastax.spark.connector.writer.BoundStatementBuilder.bind(BoundStatementBuilder.scala:99) at com.datastax.spark.connector.writer.GroupingBatchBuilder.next(GroupingBatchBuilder.scala:106) at com.datastax.spark.connector.writer.GroupingBatchBuilder.next(GroupingBatchBuilder.scala:31) at scala.collection.Iterator$class.foreach(Iterator.scala:891) at com.datastax.spark.connector.writer.GroupingBatchBuilder.foreach(GroupingBatchBuilder.scala:31) at com.datastax.spark.connector.writer.TableWriter$$anonfun$writeInternal$1.apply(TableWriter.scala:233) at com.datastax.spark.connector.writer.TableWriter$$anonfun$writeInternal$1.apply(TableWriter.scala:210) at com.datastax.spark.connector.cql.CassandraConnector$$anonfun$withSessionDo$1.apply(CassandraConnector.scala:112) at com.datastax.spark.connector.cql.CassandraConnector$$anonfun$withSessionDo$1.apply(CassandraConnector.scala:111) at com.datastax.spark.connector.cql.CassandraConnector.closeResourceAfterUse(CassandraConnector.scala:145) at com.datastax.spark.connector.cql.CassandraConnector.withSessionDo(CassandraConnector.scala:111) at com.datastax.spark.connector.writer.TableWriter.writeInternal(TableWriter.scala:210) at com.datastax.spark.connector.writer.TableWriter.insert(TableWriter.scala:197) at com.datastax.spark.connector.writer.TableWriter.write(TableWriter.scala:183) at com.datastax.spark.connector.RDDFunctions$$anonfun$saveToCassandra$1.apply(RDDFunctions.scala:36) at com.datastax.spark.connector.RDDFunctions$$anonfun$saveToCassandra$1.apply(RDDFunctions.scala:36) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) at org.apache.spark.scheduler.Task.run(Task.scala:121) at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408) at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748)

pyspark
asked on Stack Overflow Jul 22, 2019 by Suyash Raj • edited Jul 22, 2019 by thebluephantom

0 Answers

Nobody has answered this question yet.


User contributions licensed under CC BY-SA 3.0