From 425d718234c9baf212d2b06f244e1c2eb0143974 Mon Sep 17 00:00:00 2001 From: nhynes Date: Thu, 8 Mar 2018 17:03:01 -0800 Subject: [PATCH] Add end-to-end SGX ResNet inference example (#388) --- nnvm/examples/sgx/.gitignore | 2 + nnvm/examples/sgx/Makefile | 102 +++++++++++++++++++ nnvm/examples/sgx/README.md | 39 ++++++++ nnvm/examples/sgx/app.cc | 135 ++++++++++++++++++++++++++ nnvm/examples/sgx/build_model.py | 74 ++++++++++++++ nnvm/examples/sgx/enclave.cc | 98 +++++++++++++++++++ nnvm/examples/sgx/enclave_config.xml | 11 +++ nnvm/examples/sgx/enclave_private.pem | 39 ++++++++ nnvm/examples/sgx/model.edl | 7 ++ nnvm/examples/sgx/run_example.sh | 7 ++ 10 files changed, 514 insertions(+) create mode 100644 nnvm/examples/sgx/.gitignore create mode 100644 nnvm/examples/sgx/Makefile create mode 100644 nnvm/examples/sgx/README.md create mode 100644 nnvm/examples/sgx/app.cc create mode 100644 nnvm/examples/sgx/build_model.py create mode 100644 nnvm/examples/sgx/enclave.cc create mode 100644 nnvm/examples/sgx/enclave_config.xml create mode 100644 nnvm/examples/sgx/enclave_private.pem create mode 100644 nnvm/examples/sgx/model.edl create mode 100755 nnvm/examples/sgx/run_example.sh diff --git a/nnvm/examples/sgx/.gitignore b/nnvm/examples/sgx/.gitignore new file mode 100644 index 0000000000000..25f8f1058cab8 --- /dev/null +++ b/nnvm/examples/sgx/.gitignore @@ -0,0 +1,2 @@ +lib/ +bin/ diff --git a/nnvm/examples/sgx/Makefile b/nnvm/examples/sgx/Makefile new file mode 100644 index 0000000000000..490de0aa5ba29 --- /dev/null +++ b/nnvm/examples/sgx/Makefile @@ -0,0 +1,102 @@ +# Makefile for example to deploy TVM modules in SGX. + +PYTHON ?= python + +NNVM_ROOT := $(shell cd ../../; pwd) +TVM_ROOT := $(NNVM_ROOT)/tvm +DMLC_CORE_ROOT := $(NNVM_ROOT)/dmlc-core + +SGX_SDK ?= /opt/sgxsdk +SGX_MODE ?= SIM +SGX_ARCH ?= x64 +SGX_DEBUG ?= 1 + +sgx_edger8r := $(SGX_SDK)/bin/x64/sgx_edger8r +sgx_enclave_signer := $(SGX_SDK)/bin/x64/sgx_sign + +ifneq ($(SGX_MODE), HW) + sgx_sim := _sim +endif +urts_library_name := sgx_urts$(sgx_sim) +trts_library_name := sgx_trts$(sgx_sim) +tservice_library_name := sgx_tservice$(sgx_sim) +uservice_library_name := sgx_uae_service$(sgx_sim) + +pkg_cflags := -std=c++11 -O2 -fPIC\ + -I$(NNVM_ROOT)/include\ + -I$(NNVM_ROOT)\ + -I$(TVM_ROOT)/include\ + -I$(TVM_ROOT)/dlpack/include\ + -I$(DMLC_CORE_ROOT)/include\ + -DDMLC_LOG_STACK_TRACE=0\ + +pkg_ldflags := -L$(TVM_ROOT)/lib + +enclave_include_paths := -I$(SGX_SDK)/include\ + -I$(SGX_SDK)/include/tlibc\ + -I$(SGX_SDK)/include/libcxx\ + -I$(SGX_SDK)/include/stdc++\ + +enclave_cflags := -static -nostdinc\ + -fvisibility=hidden -fpie -fstack-protector-strong\ + -ffunction-sections -fdata-sections\ + -DDMLC_CXX11_THREAD_LOCAL=0\ + $(enclave_include_paths)\ + +enclave_cxxflags := -nostdinc++ $(enclave_cflags) + +enclave_ldflags :=\ + -Wl,--no-undefined -nostdlib -nodefaultlibs -nostartfiles -L$(SGX_SDK)/lib64\ + -Wl,--whole-archive -l$(trts_library_name) -Wl,--no-whole-archive\ + -Wl,--start-group\ + -lsgx_tstdc -lsgx_tstdcxx -lsgx_tcxx -lsgx_tcrypto -lsgx_tkey_exchange -l$(tservice_library_name)\ + -Wl,--end-group\ + -Wl,-Bstatic -Wl,-Bsymbolic -Wl,--no-undefined\ + -Wl,-pie,-eenclave_entry -Wl,--export-dynamic\ + -Wl,--defsym,__ImageBase=0 -Wl,--gc-sections + +app_cflags := -I$(SGX_SDK)/include -Ilib + +app_ldflags := -L$(SGX_SDK)/lib64\ + -l$(urts_library_name) -l$(uservice_library_name) -lpthread\ + +.PHONY: clean all + +all: lib/model.signed.so bin/run_model + +# The code library built by TVM +lib/deploy_%.o: build_model.py + @mkdir -p $(@D) + $(PYTHON) build_model.py + +# EDL files +lib/model_%.c: model.edl $(sgx_edger8r) + @mkdir -p $(@D) + $(sgx_edger8r) $< --trusted-dir $(@D) --untrusted-dir $(@D) --search-path $(SGX_SDK)/include + +lib/model_%.o: lib/model_%.c + $(CC) $(enclave_cflags) -c $< -o $@ + +# The enclave library +lib/model.so: enclave.cc $(TVM_ROOT)/sgx/sgx_runtime.cc lib/model_t.o lib/deploy_lib.o + $(CXX) $^ -o $@ $(pkg_cflags) $(pkg_ldflags) $(enclave_cxxflags) $(enclave_ldflags)\ + -Wl,--format=binary -Wl,lib/deploy_graph.json -Wl,lib/deploy_params.bin -Wl,--format=default + +# The signed enclave +lib/model.signed.so: lib/model.so enclave_config.xml + $(sgx_enclave_signer) sign -key enclave_private.pem -enclave $< -out $@ -config enclave_config.xml + +# An app that runs the enclave +bin/run_model: app.cc lib/model_u.o + @mkdir -p $(@D) + $(CXX) $^ -o $@ $(app_cflags) $(app_ldflags) + +# Debugging binary that runs TVM without SGX +bin/run_model_nosgx: enclave.cc $(TVM_ROOT)/sgx/sgx_runtime.cc lib/deploy_lib.o + @mkdir -p $(@D) + $(CXX) $^ -o $@ $(pkg_cflags) $(pkg_ldflags)\ + -Wl,--format=binary -Wl,lib/deploy_graph.json -Wl,lib/deploy_params.bin -Wl,--format=default + + +clean: + rm -rf lib bin diff --git a/nnvm/examples/sgx/README.md b/nnvm/examples/sgx/README.md new file mode 100644 index 0000000000000..97ec1fcfca578 --- /dev/null +++ b/nnvm/examples/sgx/README.md @@ -0,0 +1,39 @@ +# TVM in Intel SGX Example + +This application demonstrates running a ResNet18 using NNVM inside of an +[Intel SGX](https://software.intel.com/en-us/blogs/2013/09/26/protecting-application-secrets-with-intel-sgx) trusted computing environment. + +## Prerequisites + +1. A GNU/Linux environment +2. NNVM, TVM compiled with LLVM, and their corresponding Python modules +3. The [Linux SGX SDK](https://github.com/intel/linux-sgx) [link to pre-built libraries](https://01.org/intel-software-guard-extensions/downloads) +4. `pip install --user mxnet pillow` + +## Running the example + +`SGX_SDK=/path/to/sgxsdk bash run_example.sh` + +If everything goes well, you should see a lot of build messages and below them +the text `It's a tabby!`. + +## High-level overview + +First of all, it helps to think of an SGX enclave as a library that can be called +to perform trusted computation. +In this library, one can use other libraries like TVM. + +Building this example performs the following steps: + +1. Downloads a pre-trained MXNet ResNet and a + [test image](https://github.com/BVLC/caffe/blob/master/examples/images/cat.jpg) +2. Converts the ResNet to an NNVM graph + library +3. Links the graph JSON definition, params, and runtime library into into an SGX + enclave along with some code that performs inference. +4. Compiles and runs an executable that loads the enclave and requests that it perform + inference on the image. + which invokes the TVM module. + +For more information on building, please refer to the `Makefile`. +For more information on the TVM module, please refer to `../howto_deploy`. +For more in formation on SGX enclaves, please refer to the [SGX Enclave Demo](https://github.com/intel/linux-sgx/tree/master/SampleCode/SampleEnclave/) diff --git a/nnvm/examples/sgx/app.cc b/nnvm/examples/sgx/app.cc new file mode 100644 index 0000000000000..001c0d71d69e3 --- /dev/null +++ b/nnvm/examples/sgx/app.cc @@ -0,0 +1,135 @@ +#include +#include +#include +#include + +#include "sgx_urts.h" +#include "sgx_eid.h" +#include "model_u.h" + +#define TOKEN_FILENAME "bin/enclave.token" +#define ENCLAVE_FILENAME "lib/model.signed.so" + +sgx_enclave_id_t global_eid = 0; // global EID shared by multiple threads + +typedef struct _sgx_errlist_t { + sgx_status_t err; + const char *msg; +} sgx_errlist_t; + +/* Error code returned by sgx_create_enclave */ +static sgx_errlist_t sgx_errlist[] = { + { SGX_ERROR_DEVICE_BUSY, "SGX device was busy." }, + { SGX_ERROR_ENCLAVE_FILE_ACCESS, "Can't open enclave file." }, + { SGX_ERROR_ENCLAVE_LOST, "Power transition occurred." }, + { SGX_ERROR_INVALID_ATTRIBUTE, "Enclave was not authorized." }, + { SGX_ERROR_INVALID_ENCLAVE, "Invalid enclave image." }, + { SGX_ERROR_INVALID_ENCLAVE_ID, "Invalid enclave identification." }, + { SGX_ERROR_INVALID_METADATA, "Invalid enclave metadata." }, + { SGX_ERROR_INVALID_PARAMETER, "Invalid parameter." }, + { SGX_ERROR_INVALID_SIGNATURE, "Invalid enclave signature." }, + { SGX_ERROR_INVALID_VERSION, "Enclave version was invalid." }, + { SGX_ERROR_MEMORY_MAP_CONFLICT, "Memory map conflicted." }, + { SGX_ERROR_NO_DEVICE, "Invalid SGX device." }, + { SGX_ERROR_OUT_OF_EPC, "Out of EPC memory." }, + { SGX_ERROR_OUT_OF_MEMORY, "Out of memory." }, + { SGX_ERROR_UNEXPECTED, "Unexpected error occurred." }, +}; + +/* Check error conditions for loading enclave */ +void print_error_message(sgx_status_t status) +{ + size_t idx = 0; + size_t ttl = sizeof sgx_errlist/sizeof sgx_errlist[0]; + + for (idx = 0; idx < ttl; idx++) { + if(status == sgx_errlist[idx].err) { + printf("Error: %s\n", sgx_errlist[idx].msg); + break; + } + } + + if (idx == ttl) + printf("Error code is 0x%X. Please refer to the \"Intel SGX SDK Developer Reference\" for more details.\n", status); +} + +/* Initialize the enclave: + * Step 1: try to retrieve the launch token saved by last transaction + * Step 2: call sgx_create_enclave to initialize an enclave instance + * Step 3: save the launch token if it is updated + */ +int initialize_enclave(void) +{ + sgx_launch_token_t token = {0}; + sgx_status_t sgx_status = SGX_ERROR_UNEXPECTED; + int updated = 0; + + /* Step 1: try to retrieve the launch token saved by last transaction + * if there is no token, then create a new one. + */ + FILE *fp = fopen(TOKEN_FILENAME, "rb"); + if (fp == NULL && (fp = fopen(TOKEN_FILENAME, "wb")) == NULL) { + printf("Warning: Failed to create/open the launch token file \"%s\".\n", TOKEN_FILENAME); + return -1; + } + + /* read the token from saved file */ + size_t read_num = fread(token, 1, sizeof(sgx_launch_token_t), fp); + if (read_num != 0 && read_num != sizeof(sgx_launch_token_t)) { + /* if token is invalid, clear the buffer */ + memset(&token, 0x0, sizeof(sgx_launch_token_t)); + printf("Warning: Invalid launch token read from \"%s\".\n", TOKEN_FILENAME); + } + + /* Step 2: call sgx_create_enclave to initialize an enclave instance */ + /* Debug Support: set 2nd parameter to 1 */ + sgx_status = sgx_create_enclave(ENCLAVE_FILENAME, SGX_DEBUG_FLAG, &token, &updated, &global_eid, NULL); + if (sgx_status != SGX_SUCCESS) { + print_error_message(sgx_status); + if (fp != NULL) fclose(fp); + return -1; + } + + /* Step 3: save the launch token if it is updated */ + if (updated == 0 || fp == NULL) { + /* if the token is not updated, or file handler is invalid, do not perform saving */ + if (fp != NULL) fclose(fp); + return 0; + } + + /* reopen the file with write capablity */ + fp = freopen(TOKEN_FILENAME, "wb", fp); + if (fp == NULL) return 0; + size_t write_num = fwrite(token, 1, sizeof(sgx_launch_token_t), fp); + if (write_num != sizeof(sgx_launch_token_t)) + printf("Warning: Failed to save launch token to \"%s\".\n", TOKEN_FILENAME); + fclose(fp); + return 0; +} + +int SGX_CDECL main(int argc, char *argv[]) { + if(initialize_enclave() < 0){ + printf("Failed to initialize enclave.\n"); + return -1; + } + + std::ifstream f_img("bin/cat.bin", std::ios::binary); + std::string img(static_cast( + std::stringstream() << f_img.rdbuf()).str()); + + unsigned predicted_class; + sgx_status_t sgx_status = SGX_ERROR_UNEXPECTED; + sgx_status = ecall_infer(global_eid, &predicted_class, img.c_str()); + if (sgx_status != SGX_SUCCESS) { + print_error_message(sgx_status); + } + + sgx_destroy_enclave(global_eid); + if (predicted_class == 281) { + std::cout << "It's a tabby!" << std::endl; + return 0; + } + std::cerr << "Inference failed! Predicted class: " << + predicted_class << std::endl; + return 1; +} diff --git a/nnvm/examples/sgx/build_model.py b/nnvm/examples/sgx/build_model.py new file mode 100644 index 0000000000000..df3220afa8409 --- /dev/null +++ b/nnvm/examples/sgx/build_model.py @@ -0,0 +1,74 @@ +"""Creates a neural network graph module, the system library, and params. +Heavily inspired by tutorials/from_mxnet.py +""" +from __future__ import print_function +import ast +import os +from os import path as osp +import tempfile + +import mxnet as mx +from mxnet.gluon.model_zoo.vision import get_model +from mxnet.gluon.utils import download +import nnvm +import nnvm.compiler +import numpy as np +from PIL import Image +import tvm + + +EXAMPLE_ROOT = osp.abspath(osp.join(osp.dirname(__file__))) +BIN_DIR = osp.join(EXAMPLE_ROOT, 'bin') +LIB_DIR = osp.join(EXAMPLE_ROOT, 'lib') + +TVM_TARGET = 'llvm --system-lib' + + +def _download_model_and_image(out_dir): + mx_model = get_model('resnet18_v1', pretrained=True) + + img_path = osp.join(out_dir, 'cat.png') + bin_img_path = osp.join(out_dir, 'cat.bin') + download( + 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true', + img_path) + img = Image.open(img_path).resize((224, 224)) + img = _transform_image(img) + img.astype('float32').tofile(bin_img_path) + shape_dict = {'data': img.shape} + + return mx_model, shape_dict + + +def _transform_image(image): + image = np.array(image) - np.array([123., 117., 104.]) + image /= np.array([58.395, 57.12, 57.375]) + image = image.transpose((2, 0, 1)) + image = image[np.newaxis, :] + return image + + +def main(): + # load the model, input image, and imagenet classes + mx_model, shape_dict = _download_model_and_image(BIN_DIR) + + # convert the model, add a softmax + sym, params = nnvm.frontend.from_mxnet(mx_model) + sym = nnvm.sym.softmax(sym) + + # build the graph + graph, lib, params = nnvm.compiler.build( + sym, TVM_TARGET, shape_dict, params=params) + + # save the built graph + if not osp.isdir(LIB_DIR): + os.mkdir(LIB_DIR) + lib.save(osp.join(LIB_DIR, 'deploy_lib.o')) + with open(osp.join(LIB_DIR, 'deploy_graph.json'), 'w') as f_graph_json: + f_graph_json.write(graph.json()) + with open(osp.join(LIB_DIR, 'deploy_params.bin'), 'wb') as f_params: + f_params.write(nnvm.compiler.save_param_dict(params)) + + +if __name__ == '__main__': + main() diff --git a/nnvm/examples/sgx/enclave.cc b/nnvm/examples/sgx/enclave.cc new file mode 100644 index 0000000000000..f1210c91e046d --- /dev/null +++ b/nnvm/examples/sgx/enclave.cc @@ -0,0 +1,98 @@ +#include +#include +#include +#include +#include +#include +#include "tvm/src/runtime/graph/graph_runtime.cc" +#ifndef _LIBCPP_SGX_CONFIG +#include +#include +#include +#include "tvm/src/runtime/file_util.cc" +#endif + +// the statically linked graph json and params +extern char _binary_lib_deploy_params_bin_start[]; +extern char _binary_lib_deploy_params_bin_end[]; +extern char _binary_lib_deploy_graph_json_start[]; +extern char _binary_lib_deploy_graph_json_end[]; + +int RunInference(const char* img) { + tvm::runtime::Module graph_lib = + (*tvm::runtime::Registry::Get("module._GetSystemLib"))(); + + size_t graph_json_size = ((size_t)_binary_lib_deploy_graph_json_end - + (size_t)_binary_lib_deploy_graph_json_start); + size_t graph_params_size = ((size_t)_binary_lib_deploy_params_bin_end - + (size_t)_binary_lib_deploy_params_bin_start); + std::string graph_json(_binary_lib_deploy_graph_json_start, graph_json_size); + std::string graph_params(_binary_lib_deploy_params_bin_start, graph_params_size); + + int device_type = kDLCPU; + int device_id = 0; + + TVMContext ctx; + ctx.device_type = static_cast(device_type); + ctx.device_id = device_id; + std::shared_ptr graph_rt = + std::make_shared(); + + graph_rt->Init(graph_json, graph_lib, ctx); + graph_rt->LoadParams(graph_params); + + DLTensor* input; + DLTensor* output; + int ndim = 2; + int dtype_code = kDLFloat; + int dtype_bits = 32; + int dtype_lanes = 1; + + int batch_size = 1; + int64_t input_shape[4] = {batch_size, 3, 224, 224}; + int64_t output_shape[1] = {1000 /* num_classes */}; + TVMArrayAlloc(input_shape, 4 /* ndim */, dtype_code, dtype_bits, dtype_lanes, + device_type, device_id, &input); + TVMArrayAlloc(output_shape, 1, dtype_code, dtype_bits, dtype_lanes, + device_type, device_id, &output); + memcpy(input->data, img, sizeof(float)*batch_size*3*224*224); + + graph_rt->SetInput(graph_rt->GetInputIndex("data"), input); + graph_rt->Run(); + graph_rt->GetOutput(0, output); + + float max_prob = 0; + unsigned max_class = -1; + for (int i = 0; i < 1000; ++i) { + float p = static_cast(output->data)[i]; + if (p > max_prob) { + max_prob = p; + max_class = i; + } + } + + return max_class; +} + + +extern "C" { +int ecall_infer(const char* img) { + return RunInference(img); +} +} + +#ifndef _LIBCPP_SGX_CONFIG +int main(void) { + std::ifstream f_img("bin/cat.bin", std::ios::binary); + std::string img(static_cast( + std::stringstream() << f_img.rdbuf()).str()); + unsigned predicted_class = RunInference(img.c_str()); + if (predicted_class == 281) { + std::cout << "It's a tabby!" << std::endl; + return 0; + } + std::cerr << "Inference failed! Predicted class: " << + predicted_class << std::endl; + return -1; +} +#endif diff --git a/nnvm/examples/sgx/enclave_config.xml b/nnvm/examples/sgx/enclave_config.xml new file mode 100644 index 0000000000000..923d58ec8bf3d --- /dev/null +++ b/nnvm/examples/sgx/enclave_config.xml @@ -0,0 +1,11 @@ + + 0 + 0 + 0x2000 + 0x6794000 + 1 + 1 + 0 + 0 + 0xFFFFFFFF + diff --git a/nnvm/examples/sgx/enclave_private.pem b/nnvm/examples/sgx/enclave_private.pem new file mode 100644 index 0000000000000..529d07be3574f --- /dev/null +++ b/nnvm/examples/sgx/enclave_private.pem @@ -0,0 +1,39 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIG4gIBAAKCAYEAroOogvsj/fZDZY8XFdkl6dJmky0lRvnWMmpeH41Bla6U1qLZ +AmZuyIF+mQC/cgojIsrBMzBxb1kKqzATF4+XwPwgKz7fmiddmHyYz2WDJfAjIveJ +ZjdMjM4+EytGlkkJ52T8V8ds0/L2qKexJ+NBLxkeQLfV8n1mIk7zX7jguwbCG1Pr +nEMdJ3Sew20vnje+RsngAzdPChoJpVsWi/K7cettX/tbnre1DL02GXc5qJoQYk7b +3zkmhz31TgFrd9VVtmUGyFXAysuSAb3EN+5VnHGr0xKkeg8utErea2FNtNIgua8H +ONfm9Eiyaav1SVKzPHlyqLtcdxH3I8Wg7yqMsaprZ1n5A1v/levxnL8+It02KseD +5HqV4rf/cImSlCt3lpRg8U5E1pyFQ2IVEC/XTDMiI3c+AR+w2jSRB3Bwn9zJtFlW +KHG3m1xGI4ck+Lci1JvWWLXQagQSPtZTsubxTQNx1gsgZhgv1JHVZMdbVlAbbRMC +1nSuJNl7KPAS/VfzAgEDAoIBgHRXxaynbVP5gkO0ug6Qw/E27wzIw4SmjsxG6Wpe +K7kfDeRskKxESdsA/xCrKkwGwhcx1iIgS5+Qscd1Yg+1D9X9asd/P7waPmWoZd+Z +AhlKwhdPsO7PiF3e1AzHhGQwsUTt/Y/aSI1MpHBvy2/s1h9mFCslOUxTmWw0oj/Q +ldIEgWeNR72CE2+jFIJIyml6ftnb6qzPiga8Bm48ubKh0kvySOqnkmnPzgh+JBD6 +JnBmtZbfPT97bwTT+N6rnPqOOApvfHPf15kWI8yDbprG1l4OCUaIUH1AszxLd826 +5IPM+8gINLRDP1MA6azECPjTyHXhtnSIBZCyWSVkc05vYmNXYUNiXWMajcxW9M02 +wKzFELO8NCEAkaTPxwo4SCyIjUxiK1LbQ9h8PSy4c1+gGP4LAMR8xqP4QKg6zdu9 +osUGG/xRe/uufgTBFkcjqBHtK5L5VI0jeNIUAgW/6iNbYXjBMJ0GfauLs+g1VsOm +WfdgXzsb9DYdMa0OXXHypmV4GwKBwQDUwQj8RKJ6c8cT4vcWCoJvJF00+RFL+P3i +Gx2DLERxRrDa8AVGfqaCjsR+3vLgG8V/py+z+dxZYSqeB80Qeo6PDITcRKoeAYh9 +xlT3LJOS+k1cJcEmlbbO2IjLkTmzSwa80fWexKu8/Xv6vv15gpqYl1ngYoqJM3pd +vzmTIOi7MKSZ0WmEQavrZj8zK4endE3v0eAEeQ55j1GImbypSf7Idh7wOXtjZ7WD +Dg6yWDrri+AP/L3gClMj8wsAxMV4ZR8CgcEA0fzDHkFa6raVOxWnObmRoDhAtE0a +cjUj976NM5yyfdf2MrKy4/RhdTiPZ6b08/lBC/+xRfV3xKVGzacm6QjqjZrUpgHC +0LKiZaMtccCJjLtPwQd0jGQEnKfMFaPsnhOc5y8qVkCzVOSthY5qhz0XNotHHFmJ +gffVgB0iqrMTvSL7IA2yqqpOqNRlhaYhNl8TiFP3gIeMtVa9rZy31JPgT2uJ+kfo +gV7sdTPEjPWZd7OshGxWpT6QfVDj/T9T7L6tAoHBAI3WBf2DFvxNL2KXT2QHAZ9t +k3imC4f7U+wSE6zILaDZyzygA4RUbwG0gv8/TJVn2P/Eynf76DuWHGlaiLWnCbSz +Az2DHBQBBaku409zDQym3j1ugMRjzzSQWzJg0SIyBH3hTmnYcn3+Uqcp/lEBvGW6 +O+rsXFt3pukqJmIV8HzLGGaLm62BHUeZf3dyWm+i3p/hQAL7Xvu04QW70xuGqdr5 +afV7p5eaeQIJXyGQJ0eylV/90+qxjMKiB1XYg6WYvwKBwQCL/ddpgOdHJGN8uRom +e7Zq0Csi3hGheMKlKbN3vcxT5U7MdyHtTZZOJbTvxKNNUNYH/8uD+PqDGNneb29G +BfGzvI3EASyLIcGZF3OhKwZd0jUrWk2y7Vhob91jwp2+t73vdMbkKyI4mHOuXvGv +fg95si9oO7EBT+Oqvhccd2J+F1IVXncccYnF4u5ZGWt5lLewN/pVr7MjjykeaHqN +t+rfnQam2psA6fL4zS2zTmZPzR2tnY8Y1GBTi0Ko1OKd1HMCgcAb5cB/7/AQlhP9 +yQa04PLH9ygQkKKptZp7dy5WcWRx0K/hAHRoi2aw1wZqfm7VBNu2SLcs90kCCCxp +6C5sfJi6b8NpNbIPC+sc9wsFr7pGo9SFzQ78UlcWYK2Gu2FxlMjonhka5hvo4zvg +WxlpXKEkaFt3gLd92m/dMqBrHfafH7VwOJY2zT3WIpjwuk0ZzmRg5p0pG/svVQEH +NZmwRwlopysbR69B/n1nefJ84UO50fLh5s5Zr3gBRwbWNZyzhXk= +-----END RSA PRIVATE KEY----- diff --git a/nnvm/examples/sgx/model.edl b/nnvm/examples/sgx/model.edl new file mode 100644 index 0000000000000..fd45cc318dbf8 --- /dev/null +++ b/nnvm/examples/sgx/model.edl @@ -0,0 +1,7 @@ +enclave { + from "sgx_tstdc.edl" import sgx_thread_wait_untrusted_event_ocall, sgx_thread_set_untrusted_event_ocall, sgx_thread_setwait_untrusted_events_ocall, sgx_thread_set_multiple_untrusted_events_ocall; + + trusted { + public unsigned ecall_infer([user_check] const char* img); + }; +}; diff --git a/nnvm/examples/sgx/run_example.sh b/nnvm/examples/sgx/run_example.sh new file mode 100755 index 0000000000000..4fd0e3f36a639 --- /dev/null +++ b/nnvm/examples/sgx/run_example.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +sgx_sdk=${SGX_SDK:=/opt/sgxsdk} + +make +echo "=========================" +LD_LIBRARY_PATH="$sgx_sdk/lib64":${LD_LIBRARY_PATH} bin/run_model