diff --git a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/k8s-pyspark-sql-examples.sh b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/k8s-pyspark-sql-examples.sh index 5bfe016efe2..3394e7edcd8 100755 --- a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/k8s-pyspark-sql-examples.sh +++ b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/k8s-pyspark-sql-examples.sh @@ -2,11 +2,13 @@ status_2_k8s_pyspark_sql_basic=1 status_3_k8s_pyspark_sql_e2e=1 -SPARK_LOCAL_IP=192.168.0.112 +SPARK_LOCAL_IP=$LOCAL_IP DB_PATH=/ppml/trusted-big-data-ml/work/data/sqlite_example/100w.db if [ $status_2_k8s_pyspark_sql_basic -ne 0 ]; then -SGX=1 ./pal_loader bash -c "export TF_MKL_ALLOC_MAX_BYTES=10737418240 && \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "export TF_MKL_ALLOC_MAX_BYTES=10737418240 && \ export SPARK_LOCAL_IP=$SPARK_LOCAL_IP && \ /opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ @@ -36,12 +38,16 @@ SGX=1 ./pal_loader bash -c "export TF_MKL_ALLOC_MAX_BYTES=10737418240 && \ --conf spark.kubernetes.sgx.mem=32g \ --conf spark.kubernetes.sgx.jvm.mem=16g \ --verbose \ - /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/src/main/python/sql/basic.py" 2>&1 > k8s-pyspark-sql-basic-sgx.log + /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/src/main/python/sql/basic.py" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee k8s-pyspark-sql-basic-sgx.log fi status_2_k8s_pyspark_sql_basic=$(echo $?) if [ $status_3_k8s_pyspark_sql_e2e -ne 0 ]; then -SGX=1 ./pal_loader bash -c "export TF_MKL_ALLOC_MAX_BYTES=10737418240 && \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "export TF_MKL_ALLOC_MAX_BYTES=10737418240 && \ export SPARK_LOCAL_IP=$SPARK_LOCAL_IP && \ /opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' -Xmx10g \ @@ -70,7 +76,9 @@ SGX=1 ./pal_loader bash -c "export TF_MKL_ALLOC_MAX_BYTES=10737418240 && \ --conf spark.kubernetes.sgx.jvm.mem=16g \ --verbose \ local:///ppml/trusted-big-data-ml/work/examples/customer_profile.py \ - --db_path $DB_PATH" 2>&1 > k8s-pyspark-sql-e2e-100w-sgx.log + --db_path $DB_PATH" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee k8s-pyspark-sql-e2e-100w-sgx.log fi status_3_k8s_pyspark_sql_e2e=$(echo $?) diff --git a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/k8s-spark-pi.sh b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/k8s-spark-pi.sh index ee841e89eb4..ba13201f5bd 100644 --- a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/k8s-spark-pi.sh +++ b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/k8s-spark-pi.sh @@ -1,9 +1,11 @@ #!/bin/bash status_1_k8s_spark_pi=1 -SPARK_LOCAL_IP=192.168.0.112 +SPARK_LOCAL_IP=$LOCAL_IP if [ $status_1_k8s_spark_pi -ne 0 ]; then - SGX=1 ./pal_loader bash -c "export TF_MKL_ALLOC_MAX_BYTES=10737418240 && \ + cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "export TF_MKL_ALLOC_MAX_BYTES=10737418240 && \ export SPARK_LOCAL_IP=$SPARK_LOCAL_IP && \ /opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ @@ -34,7 +36,9 @@ if [ $status_1_k8s_spark_pi -ne 0 ]; then --conf spark.kubernetes.sgx.jvm.mem=16g \ --class org.apache.spark.examples.SparkPi \ --verbose \ - local:///ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-examples_2.12-3.1.2.jar" 2>&1 > k8s-spark-pi-sgx.log + local:///ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-examples_2.12-3.1.2.jar" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee k8s-spark-pi-sgx.log fi status_1_k8s_spark_pi=$(echo $?) diff --git a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/k8s-spark-sql-examples.sh b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/k8s-spark-sql-examples.sh index 699505dc315..1f343b7d293 100644 --- a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/k8s-spark-sql-examples.sh +++ b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/k8s-spark-sql-examples.sh @@ -2,11 +2,13 @@ status_4_k8s_spark_sql_example=1 status_5_k8s_spark_sql_e2e=1 -SPARK_LOCAL_IP=192.168.0.112 +SPARK_LOCAL_IP=$LOCAL_IP DB_PATH=/ppml/trusted-big-data-ml/work/data/sqlite_example/100w.db if [ $status_4_k8s_spark_sql_example -ne 0 ]; then -SGX=1 ./pal_loader bash -c "export TF_MKL_ALLOC_MAX_BYTES=10737418240 && \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "export TF_MKL_ALLOC_MAX_BYTES=10737418240 && \ export SPARK_LOCAL_IP=$SPARK_LOCAL_IP && \ /opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ @@ -36,12 +38,16 @@ SGX=1 ./pal_loader bash -c "export TF_MKL_ALLOC_MAX_BYTES=10737418240 && \ --conf spark.kubernetes.sgx.mem=32g \ --conf spark.kubernetes.sgx.jvm.mem=16g \ --class org.apache.spark.examples.sql.SparkSQLExample \ - /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-examples_2.12-3.1.2.jar" 2>&1 > k8s-spark-sql-example-sgx.log + /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-examples_2.12-3.1.2.jar" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee k8s-spark-sql-example-sgx.log fi status_4_k8s_spark_sql_example=$(echo $?) if [ $status_5_k8s_spark_sql_e2e -ne 0 ]; then -SGX=1 ./pal_loader bash -c "export TF_MKL_ALLOC_MAX_BYTES=10737418240 && \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "export TF_MKL_ALLOC_MAX_BYTES=10737418240 && \ export SPARK_LOCAL_IP=$SPARK_LOCAL_IP && \ /opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*:/ppml/trusted-big-data-ml/work/data/sqlite_example/spark-example-sql-e2e.jar' -Xmx10g \ @@ -70,7 +76,9 @@ SGX=1 ./pal_loader bash -c "export TF_MKL_ALLOC_MAX_BYTES=10737418240 && \ --conf spark.kubernetes.sgx.jvm.mem=16g \ --class test.SqlExample \ /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-example-sql-e2e.jar \ - $DB_PATH" 2>&1 > k8s-spark-sql-e2e-100w-sgx.log + $DB_PATH" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee k8s-spark-sql-e2e-100w-sgx.log fi status_5_k8s_spark_sql_e2e=$(echo $?) diff --git a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/pyspark-e2e-example.sh b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/pyspark-e2e-example.sh index a2a0ab8ab45..0fcf41859c6 100644 --- a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/pyspark-e2e-example.sh +++ b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/pyspark-e2e-example.sh @@ -1,12 +1,14 @@ #!/bin/bash status_8_local_spark_customer_profile=1 -SPARK_LOCAL_IP=192.168.0.112 +SPARK_LOCAL_IP=$LOCAL_IP DB_PATH=/ppml/trusted-big-data-ml/work/data/sqlite_example/100w.db # attention to SPARK_LOCAL_IP env change into targeted ip if [ $status_8_local_spark_customer_profile -ne 0 ]; then echo "example.8 local spark, Custom profile" -SGX=1 ./pal_loader bash -c "export TF_MKL_ALLOC_MAX_BYTES=10737418240 && export SPARK_LOCAL_IP=$SPARK_LOCAL_IP && /opt/jdk8/bin/java \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "export TF_MKL_ALLOC_MAX_BYTES=10737418240 && export SPARK_LOCAL_IP=$SPARK_LOCAL_IP && /opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx1g \ org.apache.spark.deploy.SparkSubmit \ @@ -17,7 +19,9 @@ SGX=1 ./pal_loader bash -c "export TF_MKL_ALLOC_MAX_BYTES=10737418240 && export --conf spark.python.use.daemon=false \ --conf spark.python.worker.reuse=false \ /ppml/trusted-big-data-ml/work/examples/customer_profile.py \ - --db_path $DB_PATH" 2>&1 > customer_profile-sgx.log + --db_path $DB_PATH" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee customer_profile-sgx.log status_8_local_spark_customer_profile=$(echo $?) fi diff --git a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/pyspark-simple-examples.sh b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/pyspark-simple-examples.sh index a3136e6f6e5..bcbb1c69821 100644 --- a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/pyspark-simple-examples.sh +++ b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/pyspark-simple-examples.sh @@ -4,13 +4,17 @@ status_4_local_spark_wordcount=1 if [ $status_3_local_spark_pi -ne 0 ]; then echo "example.3 local spark, pi" -SGX=1 ./pal_loader bash -c "/opt/jdk8/bin/java \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "/opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx1g org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --conf spark.python.use.daemon=false \ --conf spark.python.worker.reuse=false \ - /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/src/main/python/pi.py" 2>&1 > test-pi-sgx.log && \ + /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/src/main/python/pi.py" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-pi-sgx.log && \ cat test-pi-sgx.log | egrep 'roughly' status_3_local_spark_pi=$(echo $?) fi @@ -18,14 +22,18 @@ fi if [ $status_4_local_spark_wordcount -ne 0 ]; then echo "example.4 local spark, test-wordcount" -SGX=1 ./pal_loader bash -c "export PYSPARK_PYTHON=/usr/bin/python && /opt/jdk8/bin/java \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "export PYSPARK_PYTHON=/usr/bin/python && /opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx1g org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --conf spark.python.use.daemon=false \ --conf spark.python.worker.reuse=false \ /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/src/main/python/wordcount.py \ - /ppml/trusted-big-data-ml/work/examples/helloworld.py" 2>&1 > test-wordcount-sgx.log && \ + /ppml/trusted-big-data-ml/work/examples/helloworld.py" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-wordcount-sgx.log && \ cat test-wordcount-sgx.log | egrep 'print' status_4_local_spark_wordcount=$(echo $?) fi diff --git a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/pyspark-sql-api-examples.sh b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/pyspark-sql-api-examples.sh index 1186e810262..6b9339f3fba 100644 --- a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/pyspark-sql-api-examples.sh +++ b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/pyspark-sql-api-examples.sh @@ -18,7 +18,9 @@ cd /ppml/trusted-big-data-ml if [ $status_1_pyspark_sql_api_DataFrame -ne 0 ]; then echo "pysaprk sql api example.1 --- DataFrame" -SGX=1 ./pal_loader bash -c "/opt/jdk8/bin/java \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "/opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx1g \ org.apache.spark.deploy.SparkSubmit \ @@ -26,13 +28,17 @@ SGX=1 ./pal_loader bash -c "/opt/jdk8/bin/java \ --conf spark.sql.broadcastTimeout=3000 \ --conf spark.python.use.daemon=false \ --conf spark.python.worker.reuse=false \ - /ppml/trusted-big-data-ml/work/examples/sql_example.py" 2>&1 > test-sql-dataframe-sgx.log + /ppml/trusted-big-data-ml/work/examples/sql_example.py" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-sql-dataframe-sgx.log status_1_pyspark_sql_api_DataFrame=$(echo $?) fi if [ $status_2_pyspark_sql_api_SQLContext -ne 0 ]; then echo "pysaprk sql api example.2 --- SQLContext" -SGX=1 ./pal_loader bash -c "/opt/jdk8/bin/java \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "/opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx1g \ org.apache.spark.deploy.SparkSubmit \ @@ -40,137 +46,179 @@ SGX=1 ./pal_loader bash -c "/opt/jdk8/bin/java \ --conf spark.python.use.daemon=false \ --conf spark.python.worker.reuse=false \ --conf spark.sql.broadcastTimeout=3000 \ - /ppml/trusted-big-data-ml/work/examples/sql_context_example.py" 2>&1 > test-sql-context-sgx.log + /ppml/trusted-big-data-ml/work/examples/sql_context_example.py" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-sql-context-sgx.log status_2_pyspark_sql_api_SQLContext=$(echo $?) fi if [ $status_3_pyspark_sql_api_UDFRegistration -ne 0 ]; then echo "pysaprk sql api example.3 --- UDFRegistration" -SGX=1 ./pal_loader bash -c "/opt/jdk8/bin/java \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "/opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/spark-sql_2.12-3.1.2.jar' \ -Xmx1g \ org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --conf spark.python.use.daemon=false \ --conf spark.python.worker.reuse=false \ - /ppml/trusted-big-data-ml/work/examples/sql_UDFRegistration_example.py" 2>&1 > test-sql-UDFRegistration.log + /ppml/trusted-big-data-ml/work/examples/sql_UDFRegistration_example.py" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-sql-UDFRegistration.log status_3_pyspark_sql_api_UDFRegistration=$(echo $?) fi if [ $status_4_pyspark_sql_api_GroupedData -ne 0 ]; then echo "pysaprk sql api example.4 --- GroupedData" -SGX=1 ./pal_loader bash -c "/opt/jdk8/bin/java \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "/opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx1g \ org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --conf spark.python.use.daemon=false \ --conf spark.python.worker.reuse=false \ - /ppml/trusted-big-data-ml/work/examples/sql_groupeddata_example.py" 2>&1 > test-sql-groupeddata-sgx.log + /ppml/trusted-big-data-ml/work/examples/sql_groupeddata_example.py" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-sql-groupeddata-sgx.log status_4_pyspark_sql_api_GroupedData=$(echo $?) fi if [ $status_5_pyspark_sql_api_Column -ne 0 ]; then echo "pysaprk sql api example.5 --- Column" -SGX=1 ./pal_loader bash -c "/opt/jdk8/bin/java \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "/opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx1g \ org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --conf spark.python.use.daemon=false \ --conf spark.python.worker.reuse=false \ - /ppml/trusted-big-data-ml/work/examples/sql_column_example.py" 2>&1 > test-sql-column-sgx.log + /ppml/trusted-big-data-ml/work/examples/sql_column_example.py" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-sql-column-sgx.log status_5_pyspark_sql_api_Column=$(echo $?) fi if [ $status_6_pyspark_sql_api_Row_and_DataFrameNaFunctions -ne 0 ]; then echo "pysaprk sql api example.6 --- Row_and_DataFrameNaFunctions" -SGX=1 ./pal_loader bash -c "/opt/jdk8/bin/java \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "/opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx1g \ org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --conf spark.python.use.daemon=false \ --conf spark.python.worker.reuse=false \ - /ppml/trusted-big-data-ml/work/examples/sql_row_func_example.py" 2>&1 > test-sql-row-sgx.log + /ppml/trusted-big-data-ml/work/examples/sql_row_func_example.py" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-sql-row-sgx.log status_6_pyspark_sql_api_Row_and_DataFrameNaFunctions=$(echo $?) fi if [ $status_7_pyspark_sql_api_Window -ne 0 ]; then echo "pysaprk sql api example.7 --- Window" -SGX=1 ./pal_loader bash -c "/opt/jdk8/bin/java \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "/opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx1g \ org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --conf spark.python.use.daemon=false \ --conf spark.python.worker.reuse=false \ - /ppml/trusted-big-data-ml/work/examples/sql_window_example.py" 2>&1 > test-window-sgx.log + /ppml/trusted-big-data-ml/work/examples/sql_window_example.py" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-window-sgx.log status_7_pyspark_sql_api_Window=$(echo $?) fi if [ $status_8_pyspark_sql_api_DataframeReader -ne 0 ]; then echo "pysaprk sql api example.8 --- DataframeReader" -SGX=1 ./pal_loader bash -c "/opt/jdk8/bin/java \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "/opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx1g \ org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --conf spark.python.use.daemon=false \ --conf spark.python.worker.reuse=false \ - /ppml/trusted-big-data-ml/work/examples/sql_dataframe_reader_example.py" 2>&1 > test-dataframe-reader-sgx.log + /ppml/trusted-big-data-ml/work/examples/sql_dataframe_reader_example.py" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-dataframe-reader-sgx.log status_8_pyspark_sql_api_DataframeReader=$(echo $?) fi if [ $status_9_pyspark_sql_api_DataframeWriter -ne 0 ]; then echo "pysaprk sql api example.9 --- DataframeWriter" -SGX=1 ./pal_loader bash -c "/opt/jdk8/bin/java \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "/opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx1g \ org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --conf spark.python.use.daemon=false \ --conf spark.python.worker.reuse=false \ - /ppml/trusted-big-data-ml/work/examples/sql_dataframe_writer_example.py" 2>&1 > test-dataframe-writer-sgx.log + /ppml/trusted-big-data-ml/work/examples/sql_dataframe_writer_example.py" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-dataframe-writer-sgx.log status_9_pyspark_sql_api_DataframeWriter=$(echo $?) fi if [ $status_10_pyspark_sql_api_HiveContext -ne 0 ]; then echo "pysaprk sql api example.10 --- HiveContext" -SGX=1 ./pal_loader bash -c "/opt/jdk8/bin/java \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "/opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx1g \ org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --conf spark.python.use.daemon=false \ --conf spark.python.worker.reuse=false \ - /ppml/trusted-big-data-ml/work/examples/sql_hive_context_example.py" 2>&1 > sql_hive_context_example-sgx.log + /ppml/trusted-big-data-ml/work/examples/sql_hive_context_example.py" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee sql_hive_context_example-sgx.log status_10_pyspark_sql_api_HiveContext=$(echo $?) fi if [ $status_11_pyspark_sql_api_Catalog -ne 0 ]; then echo "pysaprk sql api example.11 --- Catalog" -SGX=1 ./pal_loader bash -c "/opt/jdk8/bin/java \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "/opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx1g \ org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --conf spark.python.use.daemon=false \ --conf spark.python.worker.reuse=false \ - /ppml/trusted-big-data-ml/work/examples/sql_catalog_example.py" 2>&1 > sql_catalog_example-sgx.log + /ppml/trusted-big-data-ml/work/examples/sql_catalog_example.py" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee sql_catalog_example-sgx.log status_11_pyspark_sql_api_Catalog=$(echo $?) fi if [ $status_12_pyspark_sql_types_module -ne 0 ]; then echo "pysaprk sql api example.12 --- types module" -SGX=1 ./pal_loader bash -c "/opt/jdk8/bin/java \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "/opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx1g \ org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --conf spark.python.use.daemon=false \ --conf spark.python.worker.reuse=false \ - /ppml/trusted-big-data-ml/work/examples/sql_types_example.py" 2>&1 > sql_types_example-sgx.log + /ppml/trusted-big-data-ml/work/examples/sql_types_example.py" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee sql_types_example-sgx.log status_12_pyspark_sql_types_module=$(echo $?) fi @@ -178,14 +226,18 @@ if [ $status_13_pyspark_sql_functions_module -ne 0 ]; then echo "pysaprk sql api example.13 --- pyspark api functions" for f_num in {a..g} do - SGX=1 ./pal_loader bash -c "/opt/jdk8/bin/java \ + cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "/opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx1g \ org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --conf spark.python.use.daemon=false \ --conf spark.python.worker.reuse=false \ - /ppml/trusted-big-data-ml/work/examples/sql_functions_${f_num}_example.py" 2>&1 > sql_functions_${f_num}_example.log + /ppml/trusted-big-data-ml/work/examples/sql_functions_${f_num}_example.py" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee sql_functions_${f_num}_example.log done fi diff --git a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/pyspark-sql-examples.sh b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/pyspark-sql-examples.sh index 16267729201..075b8985cb5 100644 --- a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/pyspark-sql-examples.sh +++ b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/pyspark-sql-examples.sh @@ -8,21 +8,27 @@ cd /ppml/trusted-big-data-ml if [ $status_5_local_spark_basic_sql -ne 0 ]; then echo "example.5 local spark, Basic SQL" -SGX=1 ./pal_loader bash -c "export PYSPARK_PYTHON=/usr/bin/python && \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "export PYSPARK_PYTHON=/usr/bin/python && \ /opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx1g org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --conf spark.python.use.daemon=false \ --conf spark.python.worker.reuse=false \ - /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/src/main/python/sql/basic.py" 2>&1 > test-sql-basic-sgx.log && \ + /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/src/main/python/sql/basic.py" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-sql-basic-sgx.log && \ cat test-sql-basic-sgx.log | egrep '\+\-|Name:' -A10 status_5_local_spark_basic_sql=$(echo $?) fi if [ $status_6_local_spark_arrow -ne 0 ]; then echo "example.6 local spark, Arrow" -SGX=1 ./pal_loader bash -c "export PYSPARK_PYTHON=/usr/bin/python && \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "export PYSPARK_PYTHON=/usr/bin/python && \ export ARROW_PRE_0_15_IPC_FORMAT=0 && \ /opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ @@ -33,13 +39,17 @@ SGX=1 ./pal_loader bash -c "export PYSPARK_PYTHON=/usr/bin/python && \ --conf spark.sql.execution.arrow.enabled=true \ --conf spark.driver.memory=2g \ --executor-memory 2g \ - /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/src/main/python/sql/arrow.py" 2>&1 > test-sql-arrow-sgx.log + /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/src/main/python/sql/arrow.py" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-sql-arrow-sgx.log status_6_local_spark_arrow=$(echo $?) fi if [ $status_7_local_spark_hive -ne 0 ]; then echo "example.7 local spark, Hive" -SGX=1 ./pal_loader bash -c "export PYSPARK_PYTHON=/usr/bin/python && \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "export PYSPARK_PYTHON=/usr/bin/python && \ /opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx2g org.apache.spark.deploy.SparkSubmit \ @@ -49,7 +59,9 @@ SGX=1 ./pal_loader bash -c "export PYSPARK_PYTHON=/usr/bin/python && \ --conf spark.driver.memory=2g \ --conf spark.sql.broadcastTimeout=30000 \ --executor-memory 2g \ - /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/src/main/python/sql/hive.py" 2>&1 > test-sql-hive-sgx.log + /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/src/main/python/sql/hive.py" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-sql-hive-sgx.log status_7_local_spark_hive=$(echo $?) fi diff --git a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/spark-e2e-example.sh b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/spark-e2e-example.sh index b341043a163..1c911661d20 100644 --- a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/spark-e2e-example.sh +++ b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/spark-e2e-example.sh @@ -1,10 +1,12 @@ status_8_scala_e2e=1 -LOCAL_IP=192.168.0.112 +LOCAL_IP=$LOCAL_IP DB_PATH=$1 if [ $status_8_scala_e2e -ne 0 ]; then - SGX=1 ./pal_loader bash -c "/opt/jdk8/bin/java \ + cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "/opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*:/ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-example-sql-e2e.jar' \ -Xmx2g \ org.apache.spark.deploy.SparkSubmit \ @@ -14,7 +16,9 @@ if [ $status_8_scala_e2e -ne 0 ]; then --executor-memory 8g \ --class test.SqlExample \ /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-example-sql-e2e.jar \ - $DB_PATH" 2>&1 > spark-example-sql-e2e-sgx.log + $DB_PATH" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee spark-example-sql-e2e-sgx.log fi status_8_scala_e2e=$(echo $?) diff --git a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/spark-examples.sh b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/spark-examples.sh index fef0e3b739c..0594a5ea965 100644 --- a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/spark-examples.sh +++ b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/spark-examples.sh @@ -3,14 +3,18 @@ cd /ppml/trusted-big-data-ml status_1_scala_spark_pi=1 if [ $status_1_scala_spark_pi -ne 0 ]; then -SGX=1 ./pal_loader bash -c "/opt/jdk8/bin/java \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "/opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx10g \ -XX:ActiveProcessorCount=24 \ org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --class org.apache.spark.examples.SparkPi \ - /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-examples_2.12-3.1.2.jar" 2>&1 > test-scala-spark-pi-sgx.log + /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-examples_2.12-3.1.2.jar" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-scala-spark-pi-sgx.log fi status_1_scala_spark_pi=$(echo $?) diff --git a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/spark-sql-examples.sh b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/spark-sql-examples.sh index bf0733ec871..070aeb91e10 100644 --- a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/spark-sql-examples.sh +++ b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/spark-sql-examples.sh @@ -9,22 +9,28 @@ status_5_scala_sql_UserDefinedScalar=1 status_6_scala_sql_UserDefinedTypedAggregation=1 status_7_scala_sql_UserDefinedUntypedAggregation=1 status_8_scala_sql_SparkHiveExample=1 -LOCAL_IP=192.168.0.112 +LOCAL_IP=$LOCAL_IP if [ $status_2_scala_sql_example -ne 0 ]; then -SGX=1 ./pal_loader bash -c "/opt/jdk8/bin/java \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "/opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx10g \ -XX:ActiveProcessorCount=24 \ org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --class org.apache.spark.examples.sql.SparkSQLExample \ - /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-examples_2.12-3.1.2.jar" 2>&1 > test-scala-spark-sql-example-sgx.log + /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-examples_2.12-3.1.2.jar" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-scala-spark-sql-example-sgx.log fi status_2_scala_sql_example=$(echo $?) if [ $status_3_scala_sql_RDDRelation -ne 0 ]; then -SGX=1 ./pal_loader bash -c "rm -rf pair.parquet && \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "rm -rf pair.parquet && \ /opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx10g \ @@ -32,12 +38,16 @@ SGX=1 ./pal_loader bash -c "rm -rf pair.parquet && \ org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --class org.apache.spark.examples.sql.RDDRelation \ - /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-examples_2.12-3.1.2.jar" 2>&1 > test-scala-spark-sql-RDDRelation-sgx.log + /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-examples_2.12-3.1.2.jar" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-scala-spark-sql-RDDRelation-sgx.log fi status_3_scala_sql_RDDRelation=$(echo $?) if [ $status_4_scala_sql_SimpleTypedAggregator -ne 0 ]; then -SGX=1 ./pal_loader bash -c "rm -rf pair.parquet && \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "rm -rf pair.parquet && \ /opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx10g \ @@ -45,12 +55,16 @@ SGX=1 ./pal_loader bash -c "rm -rf pair.parquet && \ org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --class org.apache.spark.examples.sql.SimpleTypedAggregator \ - /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-examples_2.12-3.1.2.jar" 2>&1 > test-scala-spark-sql-SimpleTypedAggregator-sgx.log + /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-examples_2.12-3.1.2.jar" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-scala-spark-sql-SimpleTypedAggregator-sgx.log fi status_4_scala_sql_SimpleTypedAggregator=$(echo $?) if [ $status_5_scala_sql_UserDefinedScalar -ne 0 ]; then -SGX=1 ./pal_loader bash -c "rm -rf pair.parquet && \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "rm -rf pair.parquet && \ /opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx10g \ @@ -58,12 +72,16 @@ SGX=1 ./pal_loader bash -c "rm -rf pair.parquet && \ org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --class org.apache.spark.examples.sql.UserDefinedScalar \ - /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-examples_2.12-3.1.2.jar" 2>&1 > test-scala-spark-sql-UserDefinedScalar-sgx.log + /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-examples_2.12-3.1.2.jar" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-scala-spark-sql-UserDefinedScalar-sgx.log fi status_5_scala_sql_UserDefinedScalar=$(echo $?) if [ $status_6_scala_sql_UserDefinedTypedAggregation -ne 0 ]; then -SGX=1 ./pal_loader bash -c "rm -rf pair.parquet && \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "rm -rf pair.parquet && \ /opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx10g \ @@ -71,12 +89,16 @@ SGX=1 ./pal_loader bash -c "rm -rf pair.parquet && \ org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --class org.apache.spark.examples.sql.UserDefinedTypedAggregation \ - /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-examples_2.12-3.1.2.jar" 2>&1 > test-scala-spark-sql-UserDefinedTypedAggregation-sgx.log + /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-examples_2.12-3.1.2.jar" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-scala-spark-sql-UserDefinedTypedAggregation-sgx.log fi status_6_scala_sql_UserDefinedTypedAggregation=$(echo $?) if [ $status_7_scala_sql_UserDefinedUntypedAggregation -ne 0 ]; then -SGX=1 ./pal_loader bash -c "rm -rf pair.parquet && \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "rm -rf pair.parquet && \ /opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx10g \ @@ -84,14 +106,18 @@ SGX=1 ./pal_loader bash -c "rm -rf pair.parquet && \ org.apache.spark.deploy.SparkSubmit \ --master 'local[4]' \ --class org.apache.spark.examples.sql.UserDefinedUntypedAggregation \ - /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-examples_2.12-3.1.2.jar" 2>&1 > test-scala-spark-sql-UserDefinedUntypedAggregation-sgx.log + /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-examples_2.12-3.1.2.jar" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-scala-spark-sql-UserDefinedUntypedAggregation-sgx.log fi status_7_scala_sql_UserDefinedUntypedAggregation=$(echo $?) if [ $status_8_scala_sql_SparkHiveExample -ne 0 ]; then rm -rf metastore_db && \ rm -rf /tmp/parquet_data && \ -SGX=1 ./pal_loader bash -c " +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c " rm -rf pair.parquet && \ /opt/jdk8/bin/java \ -cp '/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ @@ -113,7 +139,9 @@ SGX=1 ./pal_loader bash -c " --executor-cores 4 \ --total-executor-cores 4 \ --executor-memory 10G \ - /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-examples_2.12-3.1.2.jar hdfs://$LOCAL_IP:9000/spark-warehouse" 2>&1 > test-scala-spark-sql-SparkHiveExample-sgx.log + /ppml/trusted-big-data-ml/work/spark-3.1.2/examples/jars/spark-examples_2.12-3.1.2.jar hdfs://$LOCAL_IP:9000/spark-warehouse" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee test-scala-spark-sql-SparkHiveExample-sgx.log fi status_8_scala_sql_SparkHiveExample=$(echo $?) diff --git a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/start-tpch.sh b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/start-tpch.sh index f1c07c4b056..b386fcb0ade 100644 --- a/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/start-tpch.sh +++ b/ppml/trusted-big-data-ml/python/docker-graphene/test-suites/start-tpch.sh @@ -2,7 +2,7 @@ BASE_DIR=/ppml/trusted-big-data-ml WORK_DIR=$BASE_DIR/work TPCH_SPARK_DIR=$WORK_DIR/zoo-tutorials/tpch-spark -LOCAL_IP=192.168.0.112 +LOCAL_IP=$LOCAL_IP cd $WORK_DIR @@ -30,7 +30,9 @@ java $SBT_OPTS -jar /usr/local/sbt/bin/sbt-launch.jar package # run tpch cd $BASE_DIR -SGX=1 ./pal_loader bash -c "export PYSPARK_PYTHON=/usr/bin/python && \ +cd /ppml/trusted-big-data-ml +./clean.sh +/graphene/Tools/argv_serializer bash -c "export PYSPARK_PYTHON=/usr/bin/python && \ ${JAVA_HOME}/bin/java \ -cp '/ppml/trusted-big-data-ml/work/zoo-tutorials/tpch-spark/target/scala-2.12/spark-tpc-h-queries_2.12-1.0.jar:/ppml/trusted-big-data-ml/work/zoo-tutorials/tpch-spark/dbgen/*:/ppml/trusted-big-data-ml/work/spark-3.1.2/conf/:/ppml/trusted-big-data-ml/work/spark-3.1.2/jars/*' \ -Xmx10g \ @@ -54,4 +56,6 @@ SGX=1 ./pal_loader bash -c "export PYSPARK_PYTHON=/usr/bin/python && \ --driver-memory 10G \ /ppml/trusted-big-data-ml/work/zoo-tutorials/tpch-spark/target/scala-2.12/spark-tpc-h-queries_2.12-1.0.jar \ /ppml/trusted-big-data-ml/work/zoo-tutorials/tpch-spark/dbgen \ - hdfs://$LOCAL_IP:9000/dbgen-query" 2>&1 | tee spark.local.tpc.h.sgx.log + hdfs://$LOCAL_IP:9000/dbgen-query" > /ppml/trusted-big-data-ml/secured-argvs +./init.sh +SGX=1 ./pal_loader bash 2>&1 | tee spark.local.tpc.h.sgx.log