admin管理员组文章数量:1244304
Im running into error while trying to save pyspark data frame into parquet file. The directory is located on the external volume attached to the workspace I'm working on, and spark creates empty folder test_2.parquet itself but then throws the error. Im running spark locally.
I have no problem transforming this pyspark data frame to pandas, and saving it using pandas, but I would like to be able to do it through spark if possible.
Code I'm running (paths are obfuscated):
df = spark.read.parquet(different_path)
df.write.mode("overwrite").parquet("/PATH/test_2.parquet")
ls -l command for that test_2.parquet directory, which indicates that all other users should have write permissions too:
drwxrwxrwx 2 root ubuntu 0 Feb 17 07:56 test_2.parquet
Error:
Py4JJavaError Traceback (most recent call last)
/SCRIPT_PATH/script.py in line 2
----> 69 df.write.mode("overwrite").parquet("/PATH/test_2.parquet")
File /opt/conda/envs/default/lib/python3.9/site-packages/pyspark/sql/readwriter.py:1721, in DataFrameWriter.parquet(self, path, mode, partitionBy, compression)
1719 self.partitionBy(partitionBy)
1720 self._set_opts(compression=compression)
-> 1721 self._jwrite.parquet(path)
File /opt/conda/envs/default/lib/python3.9/site-packages/py4j/java_gateway.py:1322, in JavaMember.__call__(self, *args)
1316 command = proto.CALL_COMMAND_NAME +\
1317 selfmand_header +\
1318 args_command +\
1319 proto.END_COMMAND_PART
1321 answer = self.gateway_client.send_command(command)
-> 1322 return_value = get_return_value(
1323 answer, self.gateway_client, self.target_id, self.name)
1325 for temp_arg in temp_args:
1326 if hasattr(temp_arg, "_detach"):
File /opt/conda/envs/default/lib/python3.9/site-packages/pyspark/errors/exceptions/captured.py:179, in capture_sql_exception.<locals>.deco(*a, **kw)
177 def deco(*a: Any, **kw: Any) -> Any:
178 try:
--> 179 return f(*a, **kw)
180 except Py4JJavaError as e:
181 converted = convert_exception(e.java_exception)
File /opt/conda/envs/default/lib/python3.9/site-packages/py4j/protocol.py:326, in get_return_value(answer, gateway_client, target_id, name)
324 value = OUTPUT_CONVERTER[type](answer[2:], gateway_client)
325 if answer[1] == REFERENCE_TYPE:
--> 326 raise Py4JJavaError(
327 "An error occurred while calling {0}{1}{2}.\n".
328 format(target_id, ".", name), value)
329 else:
330 raise Py4JError(
331 "An error occurred while calling {0}{1}{2}. Trace:\n{3}\n".
332 format(target_id, ".", name, value))
Py4JJavaError: An error occurred while calling o55.parquet.
: ExitCodeException exitCode=1: chmod: changing permissions of '/PATH/test_2.parquet': Operation not permitted
at .apache.hadoop.util.Shell.runCommand(Shell.java:1007)
at .apache.hadoop.util.Shell.run(Shell.java:900)
at .apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1212)
at .apache.hadoop.util.Shell.execCommand(Shell.java:1306)
at .apache.hadoop.util.Shell.execCommand(Shell.java:1288)
at .apache.hadoop.fs.RawLocalFileSystem.setPermission(RawLocalFileSystem.java:978)
at .apache.hadoop.fs.RawLocalFileSystem.mkOneDirWithMode(RawLocalFileSystem.java:660)
at .apache.hadoop.fs.RawLocalFileSystem.mkdirsWithOptionalPermission(RawLocalFileSystem.java:700)
at .apache.hadoop.fs.RawLocalFileSystem.mkdirs(RawLocalFileSystem.java:672)
at .apache.hadoop.fs.RawLocalFileSystem.mkdirsWithOptionalPermission(RawLocalFileSystem.java:699)
at .apache.hadoop.fs.RawLocalFileSystem.mkdirs(RawLocalFileSystem.java:672)
at .apache.hadoop.fs.RawLocalFileSystem.mkdirsWithOptionalPermission(RawLocalFileSystem.java:699)
at .apache.hadoop.fs.RawLocalFileSystem.mkdirs(RawLocalFileSystem.java:672)
at .apache.hadoop.fs.ChecksumFileSystem.mkdirs(ChecksumFileSystem.java:788)
at .apache.hadoop.mapreduce.lib.output.FileOutputCommitter.setupJob(FileOutputCommitter.java:356)
at .apache.spark.internal.io.HadoopMapReduceCommitProtocol.setupJob(HadoopMapReduceCommitProtocol.scala:188)
at .apache.spark.sql.execution.datasources.FileFormatWriter$.writeAndCommit(FileFormatWriter.scala:269)
at .apache.spark.sql.execution.datasources.FileFormatWriter$.executeWrite(FileFormatWriter.scala:304)
at .apache.spark.sql.execution.datasources.FileFormatWriter$.write(FileFormatWriter.scala:190)
at .apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand.run(InsertIntoHadoopFsRelationCommand.scala:190)
at .apache.spark.sql.executionmand.DataWritingCommandExec.sideEffectResult$lzycompute(commands.scala:113)
at .apache.spark.sql.executionmand.DataWritingCommandExec.sideEffectResult(commands.scala:111)
at .apache.spark.sql.executionmand.DataWritingCommandExec.executeCollect(commands.scala:125)
at .apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.$anonfun$applyOrElse$1(QueryExecution.scala:107)
at .apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$6(SQLExecution.scala:125)
at .apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:201)
at .apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:108)
at .apache.spark.sql.SparkSession.withActive(SparkSession.scala:900)
at .apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:66)
at .apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:107)
at .apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:98)
at .apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:461)
at .apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:76)
at .apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:461)
at .apache.spark.sql.catalyst.plans.logical.LogicalPlan$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:32)
at .apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
at .apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
at .apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:32)
at .apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:32)
at .apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:437)
at .apache.spark.sql.execution.QueryExecution.eagerlyExecuteCommands(QueryExecution.scala:98)
at .apache.spark.sql.execution.QueryExecutionmandExecuted$lzycompute(QueryExecution.scala:85)
at .apache.spark.sql.execution.QueryExecutionmandExecuted(QueryExecution.scala:83)
at .apache.spark.sql.execution.QueryExecution.assertCommandExecuted(QueryExecution.scala:142)
at .apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:869)
at .apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:391)
at .apache.spark.sql.DataFrameWriter.saveInternal(DataFrameWriter.scala:364)
at .apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:243)
at .apache.spark.sql.DataFrameWriter.parquet(DataFrameWriter.scala:802)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:566)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:374)
at py4j.Gateway.invoke(Gateway.java:282)
at py4jmands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4jmands.CallCommand.execute(CallCommand.java:79)
at py4j.ClientServerConnection.waitForCommands(ClientServerConnection.java:182)
at py4j.ClientServerConnection.run(ClientServerConnection.java:106)
at java.base/java.lang.Thread.run(Thread.java:829)
本文标签: pythonError on saving pyspark dataframe to parquetStack Overflow
版权声明:本文标题:python - Error on saving pyspark dataframe to parquet - Stack Overflow 内容由网友自发贡献,该文观点仅代表作者本人, 转载请联系作者并注明出处:http://www.betaflare.com/web/1740170811a2235509.html, 本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌抄袭侵权/违法违规的内容,一经查实,本站将立刻删除。
发表评论