spark-submit --master yarn --deploy-mode cluster --name pyspark_job --driver-memory 2G --driver-cores 2 --executor-memory 12G --executor-cores 5 --num-executors 10 --conf spark.yarn.executor.memoryOverhead=4096 --conf spark.task.maxFailures=36 --conf spark.driver.maxResultSize=0 --conf spark.network.timeout=800s --conf spark.scheduler.listenerbus.eventqueue.size=500000 --conf spark.speculation=true --py-files lib.zip,lib1.zip,lib2.zip spark_test.py
import pyspark
import sys
from pyspark.sql import SQLContext
sc = pyspark.SparkContext()
sc.addPyFile('lib.zip')
sc.addPyFile('lib1.zip')
sc.addPyFile('lib2.zip')
from lib import XX
from lib2 import XX2
from lib3 import XX3
....