diff --git a/.gitignore b/.gitignore index 411a36c5c7..9db27249a5 100644 --- a/.gitignore +++ b/.gitignore @@ -194,3 +194,4 @@ testing/scripts/go .tox operator/operator.tar +operator/helm/tmp/ diff --git a/components/routers/epsilon-greedy/Makefile b/components/routers/epsilon-greedy/Makefile index d743882947..3d2253db6e 100644 --- a/components/routers/epsilon-greedy/Makefile +++ b/components/routers/epsilon-greedy/Makefile @@ -1,8 +1,14 @@ IMAGE_VERSION=1.3 IMAGE_NAME=seldonio/mab_epsilon_greedy -build: - s2i build . seldonio/seldon-core-s2i-python3:0.4 $(IMAGE_NAME):$(IMAGE_VERSION) +build_rest: + s2i build . -E environment_rest seldonio/seldon-core-s2i-python3:0.15 $(IMAGE_NAME)_rest:$(IMAGE_VERSION) -push_to_dockerhub: - docker push $(IMAGE_NAME):$(IMAGE_VERSION) +push_to_dockerhub_rest: + docker push $(IMAGE_NAME)_rest:$(IMAGE_VERSION) + +build_grpc: + s2i build . -E environment_grpc seldonio/seldon-core-s2i-python3:0.15 $(IMAGE_NAME)_grpc:$(IMAGE_VERSION) + +push_to_dockerhub_grpc: + docker push $(IMAGE_NAME)_grpc:$(IMAGE_VERSION) diff --git a/components/routers/epsilon-greedy/environment_grpc b/components/routers/epsilon-greedy/environment_grpc new file mode 100644 index 0000000000..1a7970f916 --- /dev/null +++ b/components/routers/epsilon-greedy/environment_grpc @@ -0,0 +1,4 @@ +MODEL_NAME=EpsilonGreedy +API_TYPE=GRPC +SERVICE_TYPE=ROUTER +PERSISTENCE=0 diff --git a/components/routers/epsilon-greedy/.s2i/environment b/components/routers/epsilon-greedy/environment_rest similarity index 100% rename from components/routers/epsilon-greedy/.s2i/environment rename to components/routers/epsilon-greedy/environment_rest diff --git a/doc/source/analytics/analytics.md b/doc/source/analytics/analytics.md index 3f4ee09b37..2e7cee43e3 100644 --- a/doc/source/analytics/analytics.md +++ b/doc/source/analytics/analytics.md @@ -1,22 +1,17 @@ -# Seldon Core Analytics +# Metrics -Seldon Core exposes metrics that can be scraped by Prometheus. The core metrics are exposed by the service orchestrator (```engine```) and API gateway (```server_ingress```). +Seldon Core exposes metrics that can be scraped by Prometheus. The core metrics are exposed by the service orchestrator (```executor```). The metrics are: **Prediction Requests** - * ```seldon_api_engine_server_requests_duration_seconds_(bucket,count,sum) ``` : Requests to the service orchestrator from an ingress, e.g. API gateway or Ambassador - * ```seldon_api_engine_client_requests_duration_seconds_(bucket,count,sum) ``` : Requests from the service orchestrator to a component, e.g., a model - * ```seldon_api_server_ingress_requests_duration_seconds_(bucket,count,sum) ``` : Requests to the API Gateway from an external client - -**Feedback Requests** - - * ```seldon_api_model_feedback_reward_total``` : Reward sent via Feedback API - * ```seldon_api_model_feedback_total``` : Total feedback requests + * ```seldon_api_executor_server_requests_duration_seconds_(bucket,count,sum) ``` : Requests to the service orchestrator from an ingress, e.g. API gateway or Ambassador + * ```seldon_api_executor_client_requests_duration_seconds_(bucket,count,sum) ``` : Requests from the service orchestrator to a component, e.g., a model Each metric has the following key value pairs for further filtering which will be taken from the SeldonDeployment custom resource that is running: + * service * deployment_name * predictor_name * predictor_version @@ -31,10 +26,11 @@ Each metric has the following key value pairs for further filtering which will b Seldon Core provides an example Helm analytics chart that displays the above Prometheus metrics in Grafana. You can install it with: ```bash -helm install seldon-core-analytics --name seldon-core-analytics \ - --repo https://storage.googleapis.com/seldon-charts \ - --set grafana_prom_admin_password=password \ - --set persistence.enabled=false \ +helm install seldon-core-analytics seldon-core-analytics \ + --repo https://storage.googleapis.com/seldon-charts \ + --set grafana_prom_admin_password=password \ + --set persistence.enabled=false \ + --namespace seldon-system ``` The available parameters are: @@ -45,10 +41,14 @@ The available parameters are: Once running you can expose the Grafana dashboard with: ```bash -kubectl port-forward $(kubectl get pods -l app=grafana-prom-server -o jsonpath='{.items[0].metadata.name}') 3000:3000 +kubectl port-forward $(kubectl get pods -n seldon-system -l app=grafana-prom-server -o jsonpath='{.items[0].metadata.name}') 3000:3000 -n seldon-system ``` You can then view the dashboard at http://localhost:3000/dashboard/db/prediction-analytics?refresh=5s&orgId=1 ![dashboard](./dashboard.png) +## Example + +There is [an example notebook you can use to test the metrics](../examples/metrics.html). + diff --git a/doc/source/analytics/logging.md b/doc/source/analytics/logging.md index 48a21e3a97..7eaba059f1 100644 --- a/doc/source/analytics/logging.md +++ b/doc/source/analytics/logging.md @@ -1,116 +1,50 @@ -# Centralised Logging with the Elastic Stack +# Payload Logging -## Introduction - -Here we will set up EFK (elasticsearch, fluentd/fluentbit, kibana) as a stack to gather logs from SeldonDeployments and make them searchable. - -This demo is aimed at minikube. - -Alternatives are available and if you are running in cloud then you can consider a managed service from your cloud provider. - -If you just want to bootstrap a full logging and request tracking setup for minikube, run ./full-setup.sh. That includes the [request logging setup](./request-logging/README.md) - -## Setup - -If helm is not already set up then it needs to be configured: - -``` -kubectl -n kube-system create sa tiller -kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller -helm init --service-account tiller -``` - -Install elasticsearch with minikube configuration: - -``` -helm install --name elasticsearch elasticsearch --version 7.1.1 --namespace=logs -f elastic-minikube.yaml --repo https://helm.elastic.co -``` - -Then fluentd as a collection agent (chosen in preference to fluentbit - see notes at end): - -``` -helm install fluentd-elasticsearch --name fluentd --namespace=logs -f fluentd-values.yaml --repo https://kiwigrid.github.io -``` - -And kibana UI: - -``` -helm install kibana --version 7.1.1 --name=kibana --namespace=logs --set service.type=NodePort --repo https://helm.elastic.co -``` - -## Generating Logging - -First we need seldon and a seldon deployment. - -Install seldon operator: - -``` -helm install --name seldon-core ../../helm-charts/seldon-core-operator/ --namespace seldon-system -``` - -Check that it now recognises the seldon CRD by running `kubectl get sdep`. - -Now a model: - -``` -helm install --name seldon-single-model ../../helm-charts/seldon-single-model/ --set engine.env.LOG_MESSAGES_EXTERNALLY="false" -``` - -And the loadtester: +Logging of request and response payloads from your Seldon Deployment can be accomplished by adding a logging section to any part of the Seldon deployment graph. An example is shown below: ``` -kubectl label nodes $(kubectl get nodes -o jsonpath='{.items[0].metadata.name}') role=locust --overwrite -helm install --name seldon-core-loadtesting ../../helm-charts/seldon-core-loadtesting/ --set locust.host=http://seldon-single-model-seldon-single-model:8000 --set oauth.enabled=false --set oauth.key=oauth-key --set oauth.secret=oauth-secret --set locust.hatchRate=1 --set locust.clients=1 --set loadtest.sendFeedback=0 --set locust.minWait=0 --set locust.maxWait=0 --set replicaCount=1 -``` - -## Inspecting Logging and Search for Requests - -To find kibana URL +apiVersion: machinelearning.seldon.io/v1 +kind: SeldonDeployment +metadata: + name: seldon-model +spec: + name: test-deployment + predictors: + - componentSpecs: + - spec: + containers: + - image: seldonio/mock_classifier:1.3 + name: classifier + graph: + children: [] + endpoint: + type: REST + name: classifier + type: MODEL + logger: + url: http://mylogging-endpoint + mode: all + name: example + replicas: 1 ``` -echo $(minikube ip)":"$(kubectl get svc kibana-kibana -n logs -o=jsonpath='{.spec.ports[?(@.port==5601)].nodePort}') -``` - -When Kibana appears for the first time there will be a brief animation while it initializes. -On the Welcome page click Explore on my own. -From the top-left or from the `Visualize and Explore Data` panel select the `Discover` item. -In the form field Index pattern enter logstash-* -It should read "Success!" and Click the `> Next` step button on the right. -In the next form select timestamp from the dropdown labeled `Time Filter` field name. -From the bottom-right of the form select `Create index pattern`. -In a moment a list of fields will appear. -From the top-left or the home screen's `Visualize and Explore Data` panel, select the `Discover` item. -The log list will appear. -Refine the list a bit by selecting `log` near the bottom the left-hand Selected fields list. -When you hover over or click on the word `log`, click the `Add` button to the right of the label. -You can create a filter using the `Add Filter` button under `Search`. The field can be `kubernetes.labels.seldon-app` and the value can be an 'is' match on `seldon-single-model-seldon-single-model`. - -The custom fields in the request bodies may not currently be in the index. If you hover over one in a request you may see `No cached mapping for this field`. - -To add mappings, go to `Management` at the bottom-left and then `Index Patterns`. Hit `Refresh` on the index created earlier. The number of fields should increase and `request.data.names` should be present. - -Now we can go back and add a further filter for `data.names` with the operator `exists`. We can add further filters if we want, such as the presence of a feature name or the presence of a feature value. - -![picture](https://raw.githubusercontent.com/SeldonIO/seldon-core/master/examples/centralised-logging/kibana-custom-search.png) - -## Notes - -The fluentd setup is configured to ensure only labelled pods are logged and seldon pods are automatically labelled. -Fluentbit can be chosen instead. This could be installed with: +The logging for the top level requets response is provided by: ``` -helm install stable/fluent-bit --name=fluent-bit --namespace=logs --set backend.type=es --set backend.es.host=elasticsearch-master + logger: + url: http://mylogging-endpoint + mode: all ``` -In that case pods would be logged. At the time of writing fluentbit only supports [excluding pods by label, not including](https://github.com/fluent/fluent-bit/issues/737). +In this example both request and response payloads as specified by the `mode` attribute are sent as CloudEvents to the url `http://mylogging-endpoint`. -Seldon can also be used to log full HTTP requests. See [request logging guide](./request-logging/README.md) +The specification is: -The elasticsearch backend is not available externally by default but can be exposed if needed for debugging with `kubectl patch svc elasticsearch-master -n logs -p '{"spec": {"type": "LoadBalancer"}}'` + * url: Any url. Optional. If not provided then it will default to the default knative borker in the namespace of the Seldon Deployment. + * mode: Either `request`, `response` or `all` -## Credits +## Example Notebook -Loosely based on https://www.katacoda.com/javajon/courses/kubernetes-observability/efk -Fluentd filtering based on https://blog.ptrk.io/tweaking-an-efk-stack-on-kubernetes/ +You can try out an [example notebook with logging](../examples/payload_logging.html) diff --git a/doc/source/examples/metrics.nblink b/doc/source/examples/metrics.nblink new file mode 100644 index 0000000000..bc809ea80b --- /dev/null +++ b/doc/source/examples/metrics.nblink @@ -0,0 +1,3 @@ +{ + "path": "../../../examples/models/metrics/metrics.ipynb" +} diff --git a/doc/source/examples/notebooks.rst b/doc/source/examples/notebooks.rst index 3721b4f77d..7528be2083 100644 --- a/doc/source/examples/notebooks.rst +++ b/doc/source/examples/notebooks.rst @@ -29,6 +29,7 @@ Notebooks Chainer MNIST Kubeflow Seldon E2E Pipeline Max gRPC Message Size + Metrics Model with Custom Metrics MLflow MLflow Pre-packaged MOdel Server A/B Test @@ -37,7 +38,9 @@ Notebooks NVIDIA TensorRT MNIST OpenVINO ImageNet OpenVINO ImageNet Ensemble - ONNX ResNet with Intel nGraph + ONNX ResNet with Intel nGraph + Payload Logging + Protocol Examples R Iris Classifier R MNIST Classifier REST timeouts @@ -48,6 +51,7 @@ Notebooks Sklearn, Xgboost, Tensorflow Server Examples Tensorflow MNIST TFserving MNIST + Tracing Spam Classification TRANSFORMER component Spam Classification TRANSFORMER + COMBINER Component diff --git a/doc/source/examples/payload_logging.nblink b/doc/source/examples/payload_logging.nblink new file mode 100644 index 0000000000..54980545d4 --- /dev/null +++ b/doc/source/examples/payload_logging.nblink @@ -0,0 +1,3 @@ +{ + "path": "../../../examples/models/payload_logging/payload_logging.ipynb" +} diff --git a/doc/source/examples/protocol_examples.nblink b/doc/source/examples/protocol_examples.nblink new file mode 100644 index 0000000000..e1ff1f047b --- /dev/null +++ b/doc/source/examples/protocol_examples.nblink @@ -0,0 +1,3 @@ +{ + "path": "../../../notebooks/protocol_examples.ipynb" +} diff --git a/doc/source/examples/seldon-grpc-dashboard.png b/doc/source/examples/seldon-grpc-dashboard.png new file mode 100644 index 0000000000..631e9d75fa Binary files /dev/null and b/doc/source/examples/seldon-grpc-dashboard.png differ diff --git a/doc/source/examples/seldon-rest-dashboard.png b/doc/source/examples/seldon-rest-dashboard.png new file mode 100644 index 0000000000..b3290477db Binary files /dev/null and b/doc/source/examples/seldon-rest-dashboard.png differ diff --git a/doc/source/examples/tfserving-grpc-dashboard.png b/doc/source/examples/tfserving-grpc-dashboard.png new file mode 100644 index 0000000000..f115ffad28 Binary files /dev/null and b/doc/source/examples/tfserving-grpc-dashboard.png differ diff --git a/doc/source/examples/tfserving-rest-dashboard.png b/doc/source/examples/tfserving-rest-dashboard.png new file mode 100644 index 0000000000..df44c3b542 Binary files /dev/null and b/doc/source/examples/tfserving-rest-dashboard.png differ diff --git a/doc/source/examples/tmpl_model_tracing.nblink b/doc/source/examples/tmpl_model_tracing.nblink deleted file mode 100644 index 8e353c968b..0000000000 --- a/doc/source/examples/tmpl_model_tracing.nblink +++ /dev/null @@ -1,3 +0,0 @@ -{ - "path": "../../../examples/models/template_model_tracing/tracing.ipynb" -} diff --git a/doc/source/examples/tracing.nblink b/doc/source/examples/tracing.nblink new file mode 100644 index 0000000000..13e10d5045 --- /dev/null +++ b/doc/source/examples/tracing.nblink @@ -0,0 +1,3 @@ +{ + "path": "../../../examples/models/tracing/tracing.ipynb" +} diff --git a/doc/source/graph/annotations.md b/doc/source/graph/annotations.md index e53e110130..cc3ea43035 100644 --- a/doc/source/graph/annotations.md +++ b/doc/source/graph/annotations.md @@ -32,36 +32,6 @@ You can configure aspects of Seldon Core via annotations in the SeldonDeployment * Locations : SeldonDeployment.spec.annotations * [gRPC headless example](grpc_load_balancing_ambassador.md) -Otherwise any annotations starting with `seldon.io/engine-` will be interpreted as specifying environment variables for the engine container. These include: - - * ```seldon.io/engine-java-opts``` : Java Opts for Service Orchestrator - * Locations : SeldonDeployment.spec.annotations - * [Java Opts example](model_engine_java_opts.md) - * Translates to the environment variable JAVA_OPTS - * ```seldon.io/engine-seldon-log-requests``` : Whether to log raw requests from engine - * Locations : SeldonDeployment.spec.annotations - * Translates to the environment variable SELDON_LOG_REQUESTS - * ```seldon.io/engine-seldon-log-responses``` : Whether to log raw responses from engine - * Locations : SeldonDeployment.spec.annotations - * Translates to the environment variable SELDON_LOG_RESPONSES - * ```seldon.io/engine-seldon-log-messages-externally``` : Option to turn on logging of requests via a logging service - * Locations : SeldonDeployment.spec.annotations - * Translates to the environment variable SELDON_LOG_MESSAGES_EXTERNALLY - * ```seldon.io/engine-seldon-log-message-type``` : Option to override type set on messages when sending to logging service. Used to determine which logger impl - * Locations : SeldonDeployment.spec.annotations - * Translates to the environment variable SELDON_LOG_MESSAGE_TYPE - * ```seldon.io/engine-seldon-message-logging-service``` : Option to override url to broker that sends to logging service - * Locations : SeldonDeployment.spec.annotations - * Translates to the environment variable SELDON_MESSAGE_LOGGING_SERVICE - -More details on logging-related variables can be seen in the [request-logging example](https://github.com/SeldonIO/seldon-core/tree/master/examples/centralised-logging/README.md). - -Environment variables for the engine can also be set in the `svcOrchSpec` section of the SeldonDeployment, alongside engine resources. For examples see the helm charts or the [distributed tracing example](./distributed-tracing.md). - -If both annotations and `svcOrchSpec` environment variables are used to set an environment variable for the engine container then `svcOrchSpec` environment variables take priority. - -The above are the key engine env vars. For a full listing of engine env vars see the application.properties file of the engine source code. - ### Misc diff --git a/doc/source/graph/distributed-tracing.md b/doc/source/graph/distributed-tracing.md index c603e0d642..2fa7c810a7 100644 --- a/doc/source/graph/distributed-tracing.md +++ b/doc/source/graph/distributed-tracing.md @@ -1,14 +1,10 @@ # Distributed Tracing -You can use Jaeger Open Tracing to trace your API calls to Seldon Core. - -This feature is available from versions >=0.2.5-SNAPSHOT of the core images and presently in: - - * Python wrappers >=0.5-SNAPSHOT +You can use Open Tracing to trace your API calls to Seldon Core. ## Install Jaeger -You will need to install Jaeger on your Kubernetes cluster. Follow their [documentation](https://github.com/jaegertracing/jaeger-kubernetes). +You will need to install Jaeger on your Kubernetes cluster. Follow their [documentation](https://www.jaegertracing.io/docs/1.16/operator/) ## Configuration @@ -143,4 +139,4 @@ An example is show below: ## Worked Example -[A fully worked template example](../examples/tmpl_model_tracing.html) is provided. +[A fully worked template example](../examples/tracing.html) is provided. diff --git a/doc/source/graph/jaeger-ui-grpc-example.png b/doc/source/graph/jaeger-ui-grpc-example.png index 7bc6c0f3e5..f616b92cf5 100644 Binary files a/doc/source/graph/jaeger-ui-grpc-example.png and b/doc/source/graph/jaeger-ui-grpc-example.png differ diff --git a/doc/source/graph/jaeger-ui-rest-example.png b/doc/source/graph/jaeger-ui-rest-example.png index 7b4b2823fe..775074a85a 100644 Binary files a/doc/source/graph/jaeger-ui-rest-example.png and b/doc/source/graph/jaeger-ui-rest-example.png differ diff --git a/doc/source/graph/protocols.md b/doc/source/graph/protocols.md new file mode 100644 index 0000000000..97890cf392 --- /dev/null +++ b/doc/source/graph/protocols.md @@ -0,0 +1,37 @@ +# Protocols + +Tensorflow protocol is only available in version >=1.1. + +Seldon Core supports the following data planes: + + * REST and gRPC Seldon protocol + * REST and gRPC Tensorflow Serving Protocol + +## REST and gRPC Seldon Protocol + + * [REST Seldon Protocol](../reference/apis/index.html) + +Seldon is the default protocol for SeldonDeployment resources. You can specify the gRPC protocol by setting `transport: grpc` in your SeldonDeployment resource or ensuring all components in the graph have endpoint.tranport set ot grpc. + +See [example notebook](../examples/protcol_examples.html). + +## REST and gRPC Tensorflow Protocol + + * [REST Tensorflow Protocol definition](https://github.com/tensorflow/serving/blob/master/tensorflow_serving/g3doc/api_rest.md). + * [gRPC Tensorflow Procotol definition](https://github.com/tensorflow/serving/blob/master/tensorflow_serving/apis/prediction_service.proto). + +Activate this protocol by speicfying `protocol: tensorflow` and `transport: rest` or `transport: grpc` in your Seldon Deployment. See [example notebook](../examples/protcol_examples.html). + +For Seldon graphs the protocol will work as expected for single model graphs for Tensorflow Serving servers running as the single model in the graph. For more complex graphs you can chain models: + + * Sending the response from the first as a request to the second. This will be done automatically when you defined a chain of models as a Seldon graph. It is up to the user to ensure the response of each changed model can be fed a request to the next in the chain. + * Only Predict calls can be handled in multiple model chaining. + +General considerations: + + * Seldon components marked as MODELS, INPUT_TRANSFORMER and OUTPUT_TRANSFORMERS will allow a PredictionService Predict method to be called. + * GetModelStatus for any model in the graph is available. + * GetModelMetadata for any model in the graph is available. + * Combining and Routing with the Tensorflow protocol is not presently supported. + + diff --git a/doc/source/graph/svcorch.md b/doc/source/graph/svcorch.md index 618f6ccfc7..2e3f8de762 100644 --- a/doc/source/graph/svcorch.md +++ b/doc/source/graph/svcorch.md @@ -4,7 +4,27 @@ The service orchestrator is a component that is added to your inference graph to - Correctly manage the request/response paths described by your inference graph - Expose Prometheus metrics -- Add meta data to the response +- Provide Tracing via Open Tracing +- Add CloudEvent based payload logging + +The current service orchestrator is a GoLang implementation. There is a previous Java implementation which is now deprecated post 1.0 releases of Seldon Core. + +Post 1.0 of Seldon Core you can specify the protocol and transport for the data plane of your inference graph. At present we allow the following combinations: + + * Protocol: Seldon, Tensorflow + * Transport: REST, gRPC + +You can see basic examples for all options in the [protocol examples notebook](../examples/protocol_examples.html). + +## Using the deprecated Java Engine + +You can continue to use the deprecated Java engine Service Orchestrator. + + * For Helm installs `--set executor.enabled=false` + * For Kustomize - update [manager.yaml](https://github.com/SeldonIO/seldon-core/blob/master/operator/config/manager/manager.yaml) env with `USE_EXECUTOR: "false"` + + +For further details on the Java engine see previous versions of this page in the docs. ## Resource Requests/Limits for Service Orchestrator @@ -58,17 +78,6 @@ You can set custom resource request and limits for this component by specifying ``` -### Java Settings - -The service orchestrator is a Java component. You can directly control its java settings as describe [here](../graph/annotations.html#service-orchestrator) - -## Environment Variables for Service Orchestrator - -You can manipulate some of the functionality of the service orchestrator by adding specific environment variables to the `svcOrchSpec` section. - -- [Configure Jaeger Tracing Example](../graph/distributed-tracing.html) -- [Set logging level in service orchestrator engine](../analytics/log_level.html#setting-log-level-in-the-seldon-engine) - ## Bypass Service Orchestrator (version >= 0.5.0, alpha feature) If you are deploying a single model then for those wishing to minimize the latency and resource usage for their deployed model you can opt out of having the service orchestrator included. To do this add the annotation `seldon.io/no-engine: "true"` to the predictor. The predictor must contain just a single node graph. An example is shown below: @@ -104,73 +113,3 @@ In these cases the external API requests will be sent directly to your model. At Note no metrics or extra data will be added to the request so this would need to be done by your model itself if needed. -## Floating-point and integer numbers - -We use [protobuf](https://developers.google.com/protocol-buffers/) to describe -the format of the input and output messages. -You can see the [reference for the SeldonMessage -object](../reference/apis/prediction.md) for more information about the actual -format. - -As part of the options to specify the input request, you can use the `jsonData` -key to submit an arbitrary json. -To serialise the info in `jsonData`, we use the -[google.protobuf.Struct](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Struct) -and -[google.protobuf.Value](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#value) -types. -One of the caveats of these types is that they don't support integers. -Instead, they treat all numbers as floating-point numbers to align with the -JSON specification, where there is no distinction between both. -Therefore, when the service orchestrator parses the request, it will always -treat integers as floating-point numbers. -This behaviour can cause issues down the line if the nodes of the inference -graph expect integers. - -To illustrate this problem, we can think of a payload such as: - -```JSON -{ - "jsonData": { - "vocabulary_length": 257, - "threshold": 0.78, - "sentence": "This is our input text" - } -} -``` - -Because of how the `protobuf` types `google.protobuf.Struct` and -`google.protobuf.Value` work, the value of the `jsonData.vocabulary_length` -field will be parsed as a floating-point number `257.0` in the service -orchestrator. -By default, this would then get serialised and sent downstream as: - -```JSON -{ - "jsonData": { - "vocabulary_length": 257.0, - "threshold": 0.78, - "sentence": "This is our input text" - } -} -``` - -The nodes of the inference graph would then parse the above as a floating-point -number, which could cause issues on any part that requires an integer input. - -As a workaround, **the orchestrator omits empty decimal parts** when it -serialises the request before sending it to downstream nodes. -Going back to the example above, the orchestrator will serialise that input -payload as: - -```JSON -{ - "jsonData": { - "vocabulary_length": 257, - "threshold": 0.78, - "sentence": "This is our input text" - } -} -``` - -Note that, if the decimal part is not empty the orchestrator will respect it. diff --git a/doc/source/index.rst b/doc/source/index.rst index b0d8c18905..3a10e2a573 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -49,44 +49,34 @@ Seldon Core is an open source platform for deploying machine learning models on .. toctree:: :maxdepth: 1 - :caption: Wrappers + :caption: Language Wrappers Python Models Java Models R Models NodeJS Models Custom Metrics - Logging & Log Level - -.. toctree:: - :maxdepth: 1 - :caption: Inference Graphs - Distributed Tracing - Annotation-based Configuration - Private Docker Registry - Service Orchestrator - .. toctree:: :maxdepth: 1 :caption: Ingress Ambassador Ingress Istio Ingress - Seldon OAuth Gateway .. toctree:: :maxdepth: 1 - :caption: Deployment Options + :caption: Production - Helm Charts - Grafana Analytics - Elastic Stack Logging + Protocols + Tracing + Metrics + Payload Logging Autoscaling - + .. toctree:: :maxdepth: 1 - :caption: ML Compliance and Governance + :caption: Advanced Inference Model Explanations Outlier Detection @@ -98,12 +88,6 @@ Seldon Core is an open source platform for deploying machine learning models on Notebooks Integrations - - -.. toctree:: - :maxdepth: 1 - :caption: Tutorials - Articles/Blogs Videos @@ -111,17 +95,20 @@ Seldon Core is an open source platform for deploying machine learning models on :maxdepth: 1 :caption: Reference - General Availability - Python API reference - Seldon Microservice API - Seldon Orchestrator + Annotation-based Configuration AWS Marketplace Install - Benchmarking - Seldon Deployment CRD - Prediction APIs - Seldon Core Helm Chart - Release Highlights + Benchmarking + General Availability + Helm Charts Images + Logging & Log Level + Private Docker Registry + Prediction APIs + Python API reference + Release Highlights + Seldon Core Helm Chart + Seldon Deployment CRD + Service Orchestrator .. toctree:: :maxdepth: 1 diff --git a/doc/source/python/python_component.md b/doc/source/python/python_component.md index 453cf01a32..0687eb04d9 100644 --- a/doc/source/python/python_component.md +++ b/doc/source/python/python_component.md @@ -146,6 +146,9 @@ class ModelWithMetrics(object): return {"system":"production"} ``` +## REST Metadata Endpoint +The python wrapper will automatically expose a `/metadata` endpoint to return metadata about the loaded model. It is up to the developer to implement a `metadata` method in their class to provide an arbitrary Dict back containing the model metadata.\ + ## REST Health Endpoint If you wish to add a REST health point, you can implement the `health_status` method with signature as shown below: ```python diff --git a/doc/source/reference/apis/external-prediction.md b/doc/source/reference/apis/external-prediction.md index d4aded2a9c..30fe13b4c0 100644 --- a/doc/source/reference/apis/external-prediction.md +++ b/doc/source/reference/apis/external-prediction.md @@ -8,7 +8,7 @@ The Seldon Core exposes a generic external API to connect your ML runtime predic ### Prediction - - endpoint : POST /api/v0.1/predictions + - endpoint : POST /api/v1.0/predictions - payload : JSON representation of ```SeldonMessage``` - see [proto definition](./prediction.md#proto-buffer-and-grpc-definition) - example payload : @@ -18,7 +18,7 @@ The Seldon Core exposes a generic external API to connect your ML runtime predic ### Feedback - - endpoint : POST /api/v0.1/feedback + - endpoint : POST /api/v1.0/feedback - payload : JSON representation of ```Feedback``` - see [proto definition](./prediction.md#proto-buffer-and-grpc-definition) ## gRPC diff --git a/doc/source/reference/apis/internal-api.md b/doc/source/reference/apis/internal-api.md index b9313d617b..6acaa3431b 100644 --- a/doc/source/reference/apis/internal-api.md +++ b/doc/source/reference/apis/internal-api.md @@ -18,8 +18,6 @@ A service to return predictions. ### REST API -#### Predict - | | | | - |- | | Endpoint | POST /predict | @@ -32,6 +30,9 @@ Example request payload: {"data":{"names":["a","b"],"tensor":{"shape":[2,2],"values":[0,0,1,1]}}} ``` +Example response payload + + ### gRPC ```protobuf @@ -40,13 +41,12 @@ service Model { } ``` -## Router +## Route A service to route requests to one of its children and receive feedback rewards for them. ### REST API -#### Route | | | | - |- | @@ -60,7 +60,22 @@ Example request payload: {"data":{"names":["a","b"],"tensor":{"shape":[2,2],"values":[0,0,1,1]}}} ``` -#### Send Feedback +Example response payload: + +```json +{"data":{"ndarray":[1]}} +``` + +### gRPC + +```protobuf +service Router { + rpc Route(SeldonMessage) returns (SeldonMessage) {}; + } +``` + + +## Send Feedback | | | | - |- | @@ -99,19 +114,16 @@ Example request payload: ```protobuf service Router { - rpc Route(SeldonMessage) returns (SeldonMessage) {}; rpc SendFeedback(Feedback) returns (SeldonMessage) {}; } ``` - ## Combiner A service to combine responses from its children into a single response. ### REST API -#### Combine | | | | - |- | @@ -135,7 +147,6 @@ A service to transform its input. ### REST API -#### Transform | | | | - |- | @@ -164,8 +175,6 @@ A service to transform the response from its child. ### REST API -#### Transform - | | | | - |- | | Endpoint | POST /transform-output | diff --git a/doc/source/workflow/install.md b/doc/source/workflow/install.md index ee97177e13..e04063b183 100644 --- a/doc/source/workflow/install.md +++ b/doc/source/workflow/install.md @@ -10,6 +10,18 @@ We presently support [Helm](#seldon-core-helm-install) and [Kustomize](#seldon-c >Please see [Migrating from Helm v2 to Helm v3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) if you are already running Seldon Core using Helm v2 and wish to upgrade. +## New Service Orchestrator + +From version 1.1 Seldon Core comes with a new service orchestrator written in Go which replaces the previous Java engine. Some breaking changes are present: + + * Metadata fields in the Seldon Protocol are no longer added. Any custom metata data will need to be added and exposed to Prometheus metrics by the individual components in the graph + * All components in the graph must either be REST or gRPC and only the given protocol is exposed externally. + +The new service orchestrator comes with several advantages including ability to handle Tensorflow REST and gRPC protocols and full metrics and tracing support for both REST and gRPC. + +For those wishing to use the deprecated Java engine service orchestrator see [the service orchestrator docs](../graph/svcorch.md) for details. + + ## Seldon Core Helm Install First [install Helm](https://docs.helm.sh). When helm is installed you can deploy the seldon controller to manage your Seldon Deployment graphs. diff --git a/examples/models/mean_classifier/Makefile b/examples/models/mean_classifier/Makefile index 48f56c14f1..bb6540acfc 100644 --- a/examples/models/mean_classifier/Makefile +++ b/examples/models/mean_classifier/Makefile @@ -2,13 +2,19 @@ VERSION=1.3 IMAGE_BASE=seldonio/mock_classifier build_rest: - s2i build -E environment_rest . seldonio/seldon-core-s2i-python36:0.13-SNAPSHOT ${IMAGE_BASE}_rest:${VERSION} + s2i build -E environment_rest . seldonio/seldon-core-s2i-python36:0.16-SNAPSHOT ${IMAGE_BASE}_rest:${VERSION} push_rest: docker push ${IMAGE_BASE}_rest:${VERSION} build_grpc: - s2i build -E environment_grpc . seldonio/seldon-core-s2i-python36:0.13-SNAPSHOT ${IMAGE_BASE}_grpc:${VERSION} + s2i build -E environment_grpc . seldonio/seldon-core-s2i-python36:0.16-SNAPSHOT ${IMAGE_BASE}_grpc:${VERSION} push_grpc: docker push ${IMAGE_BASE}_grpc:${VERSION} + +run_rest_local: + export PREDICTIVE_UNIT_SERVICE_PORT=9000 && TRACING=1 JAEGER_AGENT_HOST=localhost JAEGER_AGENT_PORT=6831 JAEGER_SAMPLER_TYPE=const JAEGER_SAMPLER_PARAM=1 seldon-core-microservice --service-type MODEL MeanClassifier REST + +run_grpc_local: + export PREDICTIVE_UNIT_SERVICE_PORT=9000 && TRACING=1 JAEGER_AGENT_HOST=localhost JAEGER_AGENT_PORT=6831 JAEGER_SAMPLER_TYPE=const JAEGER_SAMPLER_PARAM=1 seldon-core-microservice --service-type MODEL MeanClassifier GRPC diff --git a/examples/models/mean_classifier/MeanClassifier.py b/examples/models/mean_classifier/MeanClassifier.py index f5cb2d379e..0c8ee78b75 100644 --- a/examples/models/mean_classifier/MeanClassifier.py +++ b/examples/models/mean_classifier/MeanClassifier.py @@ -10,10 +10,10 @@ def __init__(self, intValue=0): self.class_names = ["proba"] assert type(intValue) == int, "intValue parameters must be an integer" self.int_value = intValue - + print("Loading model here") - X = np.load(open("model.npy",'rb'), encoding='latin1') + X = np.load(open("model.npy",'rb'), encoding='latin1') self.threshold_ = X.mean() + self.int_value def _meaning(self, x): @@ -26,5 +26,10 @@ def predict(self, X, feature_names): return [[self._meaning(x)] for x in X] - + def health_status(self): + return {"status":"ok"} + + def metadata(self): + return {"metadata":{"modelName":"mean_classifier"}} + diff --git a/examples/models/metrics/metrics.ipynb b/examples/models/metrics/metrics.ipynb new file mode 100644 index 0000000000..7b50630342 --- /dev/null +++ b/examples/models/metrics/metrics.ipynb @@ -0,0 +1,383 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Basic Examples with Different Protocols Showing Metrics\n", + "\n", + "## Prerequisites\n", + "\n", + " * A kubernetes cluster with kubectl configured\n", + " * curl\n", + " * grpcurl\n", + " * pygmentize\n", + " \n", + "\n", + "## Setup Seldon Core\n", + "\n", + "Install Seldon Core as described in [docs](https://docs.seldon.io/projects/seldon-core/en/latest/workflow/install.html)\n", + "\n", + "Then port-forward to that ingress on localhost:8003 in a separate terminal either with:\n", + "\n", + " * Ambassador: \n", + " \n", + " ```bash\n", + " kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080\n", + " ```\n", + " * Istio: \n", + " \n", + " ```bash\n", + " kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80\n", + " ```\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl create namespace seldon" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl config set-context $(kubectl config current-context) --namespace=seldon" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Install Seldon Analytics" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!helm install seldon-core-analytics ../../../helm-charts/seldon-core-analytics \\\n", + " --set grafana_prom_admin_password=password \\\n", + " --set persistence.enabled=false \\\n", + " --namespace seldon-system" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Port forward to the Grafana dashboard\n", + "\n", + "```bash\n", + "kubectl port-forward $(kubectl get pods -n seldon-system -l app=grafana -o jsonpath='{.items[0].metadata.name}') 3000:3000 -n seldon-system\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%env RESOURCES=../../../notebooks/resources" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Seldon Protocol REST Model\n", + "\n", + "**Make sure your active namespace is seldon**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pygmentize ${RESOURCES}/model_seldon_rest.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl apply -f ${RESOURCES}/model_seldon_rest.yaml -n seldon" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=restseldon-rest-seldon \\\n", + " -o jsonpath='{.items[0].metadata.name}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!for i in `seq 1 60`; do \\\n", + " sleep 1 && curl -d '{\"data\": {\"ndarray\":[[1.0, 2.0, 5.0]]}}' \\\n", + " -X POST http://localhost:8003/seldon/seldon/rest-seldon/api/v1.0/predictions \\\n", + " -H \"Content-Type: application/json\"; \\\n", + "done" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![dashboard](seldon-rest-dashboard.png)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl delete -f ${RESOURCES}/model_seldon_rest.yaml -n seldon" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Seldon Protocol GRPC Model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pygmentize ${RESOURCES}/model_seldon_grpc.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl apply -f ${RESOURCES}/model_seldon_grpc.yaml -n seldon" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=grpcseldon-grpc-seldon \\\n", + " -o jsonpath='{.items[0].metadata.name}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!cd ../../../executor/proto && for i in `seq 1 60`; do \\\n", + " sleep 1 && grpcurl -d '{\"data\":{\"ndarray\":[[1.0,2.0]]}}' \\\n", + " -rpc-header seldon:grpc-seldon -rpc-header namespace:seldon \\\n", + " -plaintext \\\n", + " -proto ./prediction.proto 0.0.0.0:8003 seldon.protos.Seldon/Predict; \\\n", + "done" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![dashboard](seldon-grpc-dashboard.png)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl delete -f ${RESOURCES}/model_seldon_grpc.yaml -n seldon" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Tensorflow Protocol REST Model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pygmentize ${RESOURCES}/model_tfserving_rest.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl apply -f ${RESOURCES}/model_tfserving_rest.yaml -n seldon" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=resttfserving-rest-tfserving \\\n", + " -o jsonpath='{.items[0].metadata.name}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!for i in `seq 1 60`; do \\\n", + " sleep 1 && curl -d '{\"instances\": [1.0, 2.0, 5.0]}' \\\n", + " -X POST http://localhost:8003/seldon/seldon/rest-tfserving/v1/models/halfplustwo/:predict \\\n", + " -H \"Content-Type: application/json\"; \\\n", + "done" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![dashboard](tfserving-rest-dashboard.png)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl delete -f ${RESOURCES}/model_tfserving_rest.yaml -n seldon" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Tensorflow Protocol GRPC Model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pygmentize ${RESOURCES}/model_tfserving_grpc.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl apply -f ${RESOURCES}/model_tfserving_grpc.yaml -n seldon" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=grpctfserving-grpc-tfserving \\\n", + " -o jsonpath='{.items[0].metadata.name}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!cd ../../../executor/proto && for i in `seq 1 60`; do \\\n", + " sleep 1 && grpcurl \\\n", + " -d '{\"model_spec\":{\"name\":\"halfplustwo\"},\"inputs\":{\"x\":{\"dtype\": 1, \"tensor_shape\": {\"dim\":[{\"size\": 3}]}, \"floatVal\" : [1.0, 2.0, 3.0]}}}' \\\n", + " -rpc-header seldon:grpc-tfserving -rpc-header namespace:seldon \\\n", + " -plaintext -proto ./prediction_service.proto \\\n", + " 0.0.0.0:8003 tensorflow.serving.PredictionService/Predict; \\\n", + "done" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![dashboard](tfserving-grpc-dashboard.png)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl delete -f ${RESOURCES}/model_tfserving_grpc.yaml -n seldon" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/models/metrics/seldon-grpc-dashboard.png b/examples/models/metrics/seldon-grpc-dashboard.png new file mode 100644 index 0000000000..631e9d75fa Binary files /dev/null and b/examples/models/metrics/seldon-grpc-dashboard.png differ diff --git a/examples/models/metrics/seldon-rest-dashboard.png b/examples/models/metrics/seldon-rest-dashboard.png new file mode 100644 index 0000000000..b3290477db Binary files /dev/null and b/examples/models/metrics/seldon-rest-dashboard.png differ diff --git a/examples/models/metrics/tfserving-grpc-dashboard.png b/examples/models/metrics/tfserving-grpc-dashboard.png new file mode 100644 index 0000000000..f115ffad28 Binary files /dev/null and b/examples/models/metrics/tfserving-grpc-dashboard.png differ diff --git a/examples/models/metrics/tfserving-rest-dashboard.png b/examples/models/metrics/tfserving-rest-dashboard.png new file mode 100644 index 0000000000..df44c3b542 Binary files /dev/null and b/examples/models/metrics/tfserving-rest-dashboard.png differ diff --git a/examples/models/payload_logging/message-dumper.yaml b/examples/models/payload_logging/message-dumper.yaml new file mode 100644 index 0000000000..b2ed98ea24 --- /dev/null +++ b/examples/models/payload_logging/message-dumper.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: logger +spec: + selector: + matchLabels: + run: logger + replicas: 1 + template: + metadata: + labels: + run: logger + spec: + containers: + - name: logger + image: mendhak/http-https-echo + ports: + - containerPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: logger + labels: + run: logger +spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + selector: + run: logger + + diff --git a/examples/models/payload_logging/model_logger.yaml b/examples/models/payload_logging/model_logger.yaml new file mode 100644 index 0000000000..445efdc881 --- /dev/null +++ b/examples/models/payload_logging/model_logger.yaml @@ -0,0 +1,24 @@ +apiVersion: machinelearning.seldon.io/v1 +kind: SeldonDeployment +metadata: + name: model-logs +spec: + name: model-logs + predictors: + - componentSpecs: + - spec: + containers: + - image: seldonio/mock_classifier_rest:1.3 + name: classifier + imagePullPolicy: Always + graph: + children: [] + endpoint: + type: REST + name: classifier + type: MODEL + logger: + url: http://logger.seldon/ + mode: all + name: logging + replicas: 1 diff --git a/examples/models/payload_logging/payload_logging.ipynb b/examples/models/payload_logging/payload_logging.ipynb new file mode 100644 index 0000000000..71b579cf52 --- /dev/null +++ b/examples/models/payload_logging/payload_logging.ipynb @@ -0,0 +1,195 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Payload Logging \n", + "\n", + "An example of payload logging of Seldon Deployment requests and responses.\n", + "\n", + "## Prerequisites\n", + "\n", + " * A kubernetes cluster with kubectl configured\n", + " * curl\n", + " * grpcurl\n", + " * pygmentize\n", + " \n", + "\n", + "## Setup Seldon Core\n", + "\n", + "Install Seldon Core as described in [docs](https://docs.seldon.io/projects/seldon-core/en/latest/workflow/install.html)\n", + "\n", + "Then port-forward to that ingress on localhost:8003 in a separate terminal either with:\n", + "\n", + " * Ambassador: \n", + " \n", + " ```bash \n", + " kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080```\n", + " \n", + " * Istio: \n", + " \n", + " ```bash \n", + " kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80```\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl create namespace seldon" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl config set-context $(kubectl config current-context) --namespace=seldon" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Deploy a Request Logger\n", + "\n", + "This will echo CloudEvents it receives.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pygmentize message-dumper.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl apply -f message-dumper.yaml -n seldon" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create a Model with Logging" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pygmentize model_logger.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl apply -f model_logger.yaml -n seldon" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Send a Prediction Request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!curl -v -d '{\"data\": {\"ndarray\":[[1.0, 2.0, 5.0]]}}' \\\n", + " -X POST http://localhost:8003/seldon/seldon/model-logs/api/v1.0/predictions \\\n", + " -H \"Content-Type: application/json\";" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Check Logger" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl logs $(kubectl get pods -l run=logger -n seldon -o jsonpath='{.items[0].metadata.name}') " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Clean Up" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl delete -f model_logger.yaml -n seldon" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl delete -f message-dumper.yaml -n seldon" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/models/template_model_tracing/contract.json b/examples/models/template_model_tracing/contract.json deleted file mode 100644 index b1e829f44d..0000000000 --- a/examples/models/template_model_tracing/contract.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "features":[ - { - "name":"feat", - "dtype":"FLOAT", - "ftype":"continuous", - "range":["inf","inf"], - "repeat":3 - } - ], - "targets":[ - { - "name":"proba", - "dtype":"FLOAT", - "ftype":"continuous", - "values":[0,1] - } - ] -} diff --git a/examples/models/template_model_tracing/deployment_grpc.json b/examples/models/template_model_tracing/deployment_grpc.json deleted file mode 100644 index 15459ca42e..0000000000 --- a/examples/models/template_model_tracing/deployment_grpc.json +++ /dev/null @@ -1,94 +0,0 @@ -{ - "apiVersion": "machinelearning.seldon.io/v1alpha2", - "kind": "SeldonDeployment", - "metadata": { - "labels": { - "app": "seldon" - }, - "name": "tracing-example", - "namespace": "seldon" - }, - "spec": { - "name": "tracing-example", - "oauth_key": "oauth-key", - "oauth_secret": "oauth-secret", - "predictors": [ - { - "componentSpecs": [{ - "spec": { - "containers": [ - { - "name": "model1", - "image": "seldonio/mock_classifier_grpc:1.1", - "env": [ - { - "name": "TRACING", - "value": "1" - }, - { - "name": "JAEGER_CONFIG_PATH", - "value": "/etc/tracing/config/tracing.yml" - } - ], - "volumeMounts": [ - { - "mountPath": "/etc/tracing/config", - "name": "tracing-config" - } - ] - } - ], - "terminationGracePeriodSeconds": 1, - "volumes": [ - { - "name": "tracing-config", - "configMap": { - "name": "tracing-config", - "items": [ - { - "key": "tracing.yml", - "path": "tracing.yml" - } - ] - } - } - ] - } - }], - "graph": { - "name": "model1", - "endpoint": { "type" : "GRPC" }, - "type": "MODEL", - "children": [ - ] - }, - "name": "tracing", - "replicas": 1, - "svcOrchSpec" : { - "env": [ - { - "name": "TRACING", - "value": "1" - }, - { - "name": "JAEGER_AGENT_HOST", - "value": "jaeger-agent" - }, - { - "name": "JAEGER_AGENT_PORT", - "value": "5775" - }, - { - "name": "JAEGER_SAMPLER_TYPE", - "value": "const" - }, - { - "name": "JAEGER_SAMPLER_PARAM", - "value": "1" - } - ] - } - } - ] - } -} diff --git a/examples/models/template_model_tracing/deployment_rest.json b/examples/models/template_model_tracing/deployment_rest.json deleted file mode 100644 index a179e3e17d..0000000000 --- a/examples/models/template_model_tracing/deployment_rest.json +++ /dev/null @@ -1,94 +0,0 @@ -{ - "apiVersion": "machinelearning.seldon.io/v1alpha2", - "kind": "SeldonDeployment", - "metadata": { - "labels": { - "app": "seldon" - }, - "name": "tracing-example", - "namespace": "seldon" - }, - "spec": { - "name": "tracing-example", - "oauth_key": "oauth-key", - "oauth_secret": "oauth-secret", - "predictors": [ - { - "componentSpecs": [{ - "spec": { - "containers": [ - { - "name": "model1", - "image": "seldonio/mock_classifier_rest:1.1", - "env": [ - { - "name": "TRACING", - "value": "1" - }, - { - "name": "JAEGER_CONFIG_PATH", - "value": "/etc/tracing/config/tracing.yml" - } - ], - "volumeMounts": [ - { - "mountPath": "/etc/tracing/config", - "name": "tracing-config" - } - ] - } - ], - "terminationGracePeriodSeconds": 1, - "volumes": [ - { - "name": "tracing-config", - "configMap": { - "name": "tracing-config", - "items": [ - { - "key": "tracing.yml", - "path": "tracing.yml" - } - ] - } - } - ] - } - }], - "graph": { - "name": "model1", - "endpoint": { "type" : "REST" }, - "type": "MODEL", - "children": [ - ] - }, - "name": "tracing", - "replicas": 1, - "svcOrchSpec" : { - "env": [ - { - "name": "TRACING", - "value": "1" - }, - { - "name": "JAEGER_AGENT_HOST", - "value": "jaeger-agent" - }, - { - "name": "JAEGER_AGENT_PORT", - "value": "5775" - }, - { - "name": "JAEGER_SAMPLER_TYPE", - "value": "const" - }, - { - "name": "JAEGER_SAMPLER_PARAM", - "value": "1" - } - ] - } - } - ] - } -} diff --git a/examples/models/template_model_tracing/jaeger-ui-grpc-example.png b/examples/models/template_model_tracing/jaeger-ui-grpc-example.png deleted file mode 100644 index 7bc6c0f3e5..0000000000 Binary files a/examples/models/template_model_tracing/jaeger-ui-grpc-example.png and /dev/null differ diff --git a/examples/models/template_model_tracing/jaeger-ui-rest-example.png b/examples/models/template_model_tracing/jaeger-ui-rest-example.png deleted file mode 100644 index 7b4b2823fe..0000000000 Binary files a/examples/models/template_model_tracing/jaeger-ui-rest-example.png and /dev/null differ diff --git a/examples/models/template_model_tracing/tracing-configmap.yaml b/examples/models/template_model_tracing/tracing-configmap.yaml deleted file mode 100644 index b626c7c1d5..0000000000 --- a/examples/models/template_model_tracing/tracing-configmap.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: tracing-config -data: - tracing.yml: | - sampler: - type: const - param: 1 - local_agent: - reporting_host: jaeger-agent - reporting_port: 5775 - logging: true diff --git a/examples/models/template_model_tracing/tracing.ipynb b/examples/models/template_model_tracing/tracing.ipynb deleted file mode 100644 index 0e6762e0a5..0000000000 --- a/examples/models/template_model_tracing/tracing.ipynb +++ /dev/null @@ -1,541 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Distributed Tracing Template\n", - "\n", - " Illustrate the configuration for allowing distributed tracing using Jaeger.\n", - " \n", - "## Dependencies\n", - "\n", - " * [Helm](https://github.com/kubernetes/helm)\n", - " * [Minikube](https://github.com/kubernetes/minikube)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Test using Minikube\n" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿ˜„ minikube v1.0.1 on linux (amd64)\n", - "๐Ÿคน Downloading Kubernetes v1.14.1 images in the background ...\n", - "๐Ÿ”ฅ Creating kvm2 VM (CPUs=2, Memory=4096MB, Disk=20000MB) ...\n", - "๐Ÿ“ถ \"minikube\" IP address is 192.168.39.190\n", - "๐Ÿณ Configuring Docker as the container runtime ...\n", - "๐Ÿณ Version of container runtime is 18.06.3-ce\n", - "โŒ› Waiting for image downloads to complete ...\n", - "โœจ Preparing Kubernetes environment ...\n", - "๐Ÿšœ Pulling images required by Kubernetes v1.14.1 ...\n", - "๐Ÿš€ Launching Kubernetes v1.14.1 using kubeadm ... \n", - "โŒ› Waiting for pods: apiserver proxy etcd scheduler controller dns\n", - "๐Ÿ”‘ Configuring cluster permissions ...\n", - "๐Ÿค” Verifying component health .....\n", - "๐Ÿ’— kubectl is now configured to use \"minikube\"\n", - "๐Ÿ„ Done! Thank you for using minikube!\n" - ] - } - ], - "source": [ - "!minikube start --memory 4096" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Setup Seldon Core\n", - "\n", - "Use the setup notebook to [Setup Cluster](../../seldon_core_setup.ipynb#Setup-Cluster) with [Ambassador Ingress](../../seldon_core_setup.ipynb#Ambassador) and [Install Seldon Core](../../seldon_core_setup.ipynb#Install-Seldon-Core). Instructions [also online](./seldon_core_setup.html)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Install Jaeger\n", - "\n", - "We will use the Jaeger All-in-1 resource found at the [Jaeger Kubernetes repo](https://github.com/jaegertracing/jaeger-kubernetes)." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "deployment.extensions/jaeger created\n", - "service/jaeger-query created\n", - "service/jaeger-collector created\n", - "service/jaeger-agent created\n", - "service/zipkin created\n" - ] - } - ], - "source": [ - "!kubectl create -f https://raw.githubusercontent.com/jaegertracing/jaeger-kubernetes/master/all-in-one/jaeger-all-in-one-template.yml -n seldon" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Start Jaeger UI\n", - "\n", - "```\n", - "minikube service jaeger-query -n seldon\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Create Jaeger ConfigMap" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[34;01mapiVersion\u001b[39;49;00m: v1\r\n", - "\u001b[34;01mkind\u001b[39;49;00m: ConfigMap\r\n", - "\u001b[34;01mmetadata\u001b[39;49;00m:\r\n", - " \u001b[34;01mname\u001b[39;49;00m: tracing-config\r\n", - "\u001b[34;01mdata\u001b[39;49;00m:\r\n", - " \u001b[34;01mtracing.yml\u001b[39;49;00m: |\r\n", - " \u001b[31msampler:\u001b[39;49;00m\r\n", - " \u001b[31mtype: const\u001b[39;49;00m\r\n", - " \u001b[31mparam: 1\u001b[39;49;00m\r\n", - " \u001b[31mlocal_agent:\u001b[39;49;00m\r\n", - " \u001b[31mreporting_host: jaeger-agent\u001b[39;49;00m\r\n", - " \u001b[31mreporting_port: 5775\u001b[39;49;00m\r\n", - " \u001b[31mlogging: true\u001b[39;49;00m\r\n" - ] - } - ], - "source": [ - "!pygmentize tracing-configmap.yaml" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "configmap/tracing-config created\r\n" - ] - } - ], - "source": [ - "!kubectl apply -f tracing-configmap.yaml -n seldon" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Run Example REST Deployment" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\r\n", - " \u001b[34;01m\"apiVersion\"\u001b[39;49;00m: \u001b[33m\"machinelearning.seldon.io/v1alpha2\"\u001b[39;49;00m,\r\n", - " \u001b[34;01m\"kind\"\u001b[39;49;00m: \u001b[33m\"SeldonDeployment\"\u001b[39;49;00m,\r\n", - " \u001b[34;01m\"metadata\"\u001b[39;49;00m: {\r\n", - " \u001b[34;01m\"labels\"\u001b[39;49;00m: {\r\n", - " \u001b[34;01m\"app\"\u001b[39;49;00m: \u001b[33m\"seldon\"\u001b[39;49;00m\r\n", - " },\r\n", - " \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"tracing-example\"\u001b[39;49;00m,\r\n", - "\t\u001b[34;01m\"namespace\"\u001b[39;49;00m: \u001b[33m\"seldon\"\u001b[39;49;00m\t\r\n", - " },\r\n", - " \u001b[34;01m\"spec\"\u001b[39;49;00m: {\r\n", - " \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"tracing-example\"\u001b[39;49;00m,\r\n", - " \u001b[34;01m\"oauth_key\"\u001b[39;49;00m: \u001b[33m\"oauth-key\"\u001b[39;49;00m,\r\n", - " \u001b[34;01m\"oauth_secret\"\u001b[39;49;00m: \u001b[33m\"oauth-secret\"\u001b[39;49;00m,\r\n", - " \u001b[34;01m\"predictors\"\u001b[39;49;00m: [\r\n", - " {\r\n", - " \u001b[34;01m\"componentSpecs\"\u001b[39;49;00m: [{\r\n", - " \u001b[34;01m\"spec\"\u001b[39;49;00m: {\r\n", - " \u001b[34;01m\"containers\"\u001b[39;49;00m: [\r\n", - " {\r\n", - " \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"model1\"\u001b[39;49;00m,\t\t\t\t\r\n", - " \u001b[34;01m\"image\"\u001b[39;49;00m: \u001b[33m\"seldonio/mock_classifier_rest:1.1\"\u001b[39;49;00m,\r\n", - "\t\t\t\t\u001b[34;01m\"env\"\u001b[39;49;00m: [\r\n", - "\t\t\t\t {\r\n", - "\t\t\t\t\t\u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"TRACING\"\u001b[39;49;00m,\r\n", - "\t\t\t\t\t\u001b[34;01m\"value\"\u001b[39;49;00m: \u001b[33m\"1\"\u001b[39;49;00m\r\n", - "\t\t\t\t },\r\n", - "\t\t\t\t {\r\n", - "\t\t\t\t\t\u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"JAEGER_CONFIG_PATH\"\u001b[39;49;00m,\r\n", - "\t\t\t\t\t\u001b[34;01m\"value\"\u001b[39;49;00m: \u001b[33m\"/etc/tracing/config/tracing.yml\"\u001b[39;49;00m\r\n", - "\t\t\t\t }\r\n", - "\t\t\t\t],\r\n", - "\t\t\t\t\u001b[34;01m\"volumeMounts\"\u001b[39;49;00m: [\r\n", - "\t\t\t\t {\r\n", - "\t\t\t\t\t\u001b[34;01m\"mountPath\"\u001b[39;49;00m: \u001b[33m\"/etc/tracing/config\"\u001b[39;49;00m,\r\n", - "\t\t\t\t\t\u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"tracing-config\"\u001b[39;49;00m\r\n", - "\t\t\t\t }\r\n", - "\t\t\t\t]\r\n", - " }\r\n", - "\t\t\t],\r\n", - "\t\t\t\u001b[34;01m\"terminationGracePeriodSeconds\"\u001b[39;49;00m: \u001b[34m1\u001b[39;49;00m,\r\n", - "\t\t\t\u001b[34;01m\"volumes\"\u001b[39;49;00m: [\r\n", - "\t\t\t {\r\n", - "\t\t\t\t\u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"tracing-config\"\u001b[39;49;00m,\r\n", - "\t\t\t\t\u001b[34;01m\"configMap\"\u001b[39;49;00m: {\r\n", - "\t\t\t\t \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"tracing-config\"\u001b[39;49;00m,\r\n", - "\t\t\t\t \u001b[34;01m\"items\"\u001b[39;49;00m: [\r\n", - "\t\t\t\t\t{\r\n", - "\t\t\t\t\t \u001b[34;01m\"key\"\u001b[39;49;00m: \u001b[33m\"tracing.yml\"\u001b[39;49;00m,\r\n", - "\t\t\t\t\t \u001b[34;01m\"path\"\u001b[39;49;00m: \u001b[33m\"tracing.yml\"\u001b[39;49;00m\r\n", - "\t\t\t\t\t}\r\n", - "\t\t\t\t ]\r\n", - "\t\t\t\t}\r\n", - "\t\t\t }\r\n", - "\t\t\t]\r\n", - "\t\t }\r\n", - "\t\t}],\r\n", - " \u001b[34;01m\"graph\"\u001b[39;49;00m: {\r\n", - "\t\t \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"model1\"\u001b[39;49;00m,\r\n", - "\t\t \u001b[34;01m\"endpoint\"\u001b[39;49;00m: { \u001b[34;01m\"type\"\u001b[39;49;00m : \u001b[33m\"REST\"\u001b[39;49;00m },\r\n", - "\t\t \u001b[34;01m\"type\"\u001b[39;49;00m: \u001b[33m\"MODEL\"\u001b[39;49;00m,\r\n", - "\t\t \u001b[34;01m\"children\"\u001b[39;49;00m: [\r\n", - "\t\t ]\r\n", - "\t\t},\r\n", - " \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"tracing\"\u001b[39;49;00m,\r\n", - " \u001b[34;01m\"replicas\"\u001b[39;49;00m: \u001b[34m1\u001b[39;49;00m,\r\n", - "\t\t\u001b[34;01m\"svcOrchSpec\"\u001b[39;49;00m : {\r\n", - "\t\t \u001b[34;01m\"env\"\u001b[39;49;00m: [\r\n", - "\t\t\t{\r\n", - "\t\t\t \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"TRACING\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[34;01m\"value\"\u001b[39;49;00m: \u001b[33m\"1\"\u001b[39;49;00m\r\n", - "\t\t\t},\r\n", - "\t\t\t{\r\n", - "\t\t\t \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"JAEGER_AGENT_HOST\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[34;01m\"value\"\u001b[39;49;00m: \u001b[33m\"jaeger-agent\"\u001b[39;49;00m\r\n", - "\t\t\t},\r\n", - "\t\t\t{\r\n", - "\t\t\t \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"JAEGER_AGENT_PORT\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[34;01m\"value\"\u001b[39;49;00m: \u001b[33m\"5775\"\u001b[39;49;00m\r\n", - "\t\t\t},\r\n", - "\t\t\t{\r\n", - "\t\t\t \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"JAEGER_SAMPLER_TYPE\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[34;01m\"value\"\u001b[39;49;00m: \u001b[33m\"const\"\u001b[39;49;00m\r\n", - "\t\t\t},\r\n", - "\t\t\t{\r\n", - "\t\t\t \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"JAEGER_SAMPLER_PARAM\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[34;01m\"value\"\u001b[39;49;00m: \u001b[33m\"1\"\u001b[39;49;00m\r\n", - "\t\t\t}\r\n", - "\t\t ]\t\t\t\t\r\n", - "\t\t}\r\n", - " }\r\n", - " ]\r\n", - " }\r\n", - "}\r\n" - ] - } - ], - "source": [ - "!pygmentize deployment_rest.json" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "seldondeployment.machinelearning.seldon.io/tracing-example created\r\n" - ] - } - ], - "source": [ - "!kubectl create -f deployment_rest.json" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "deployment \"tracing-example-tracing-535f3a8\" successfully rolled out\r\n" - ] - } - ], - "source": [ - "!kubectl rollout status deployment/tracing-example-tracing-535f3a8" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "----------------------------------------\n", - "SENDING NEW REQUEST:\n", - "\n", - "[[-2.146 0.92 -0.593]]\n", - "RECEIVED RESPONSE:\n", - "meta {\n", - " puid: \"o57qhtk6j6lql4rnl125j0fik3\"\n", - " requestPath {\n", - " key: \"model1\"\n", - " value: \"seldonio/mock_classifier_rest:1.1\"\n", - " }\n", - "}\n", - "data {\n", - " names: \"proba\"\n", - " ndarray {\n", - " values {\n", - " list_value {\n", - " values {\n", - " number_value: 0.028664848186919598\n", - " }\n", - " }\n", - " }\n", - " }\n", - "}\n", - "\n", - "\n" - ] - } - ], - "source": [ - "!seldon-core-api-tester contract.json `minikube ip` `kubectl get svc ambassador -o jsonpath='{.spec.ports[0].nodePort}'` \\\n", - " tracing-example --namespace seldon -p" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Check the Jaeger UI. You should be able to find traces like below:\n", - "\n", - "![rest](jaeger-ui-rest-example.png)" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "seldondeployment.machinelearning.seldon.io \"tracing-example\" deleted\r\n" - ] - } - ], - "source": [ - "!kubectl delete -f deployment_rest.json" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Run Example GRPC Deployment" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "seldondeployment.machinelearning.seldon.io/tracing-example created\r\n" - ] - } - ], - "source": [ - "!kubectl create -f deployment_grpc.json" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Waiting for deployment \"tracing-example-tracing-d240ae0\" rollout to finish: 0 of 1 updated replicas are available...\n", - "deployment \"tracing-example-tracing-d240ae0\" successfully rolled out\n" - ] - } - ], - "source": [ - "!kubectl rollout status deployment/tracing-example-tracing-d240ae0" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "----------------------------------------\r\n", - "SENDING NEW REQUEST:\r\n", - "\r\n", - "[[ 0.643 -1.813 1.34 ]]\r\n", - "RECEIVED RESPONSE:\r\n", - "meta {\r\n", - " puid: \"i57ndibn2lo6coa98bsesvbs2t\"\r\n", - " requestPath {\r\n", - " key: \"model1\"\r\n", - " value: \"seldonio/mock_classifier_grpc:1.1\"\r\n", - " }\r\n", - "}\r\n", - "data {\r\n", - " names: \"proba\"\r\n", - " ndarray {\r\n", - " values {\r\n", - " list_value {\r\n", - " values {\r\n", - " number_value: 0.05416670048123607\r\n", - " }\r\n", - " }\r\n", - " }\r\n", - " }\r\n", - "}\r\n", - "\r\n", - "\r\n" - ] - } - ], - "source": [ - "!seldon-core-api-tester contract.json `minikube ip` `kubectl get svc ambassador -o jsonpath='{.spec.ports[0].nodePort}'` \\\n", - " tracing-example --namespace seldon -p --grpc" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Check the Jaeger UI. You should be able to find traces like below:\n", - "\n", - "\n", - "![grpc](jaeger-ui-grpc-example.png)" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "seldondeployment.machinelearning.seldon.io \"tracing-example\" deleted\r\n" - ] - } - ], - "source": [ - "!kubectl delete -f deployment_grpc.json" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.5" - }, - "varInspector": { - "cols": { - "lenName": 16, - "lenType": 16, - "lenVar": 40 - }, - "kernels_config": { - "python": { - "delete_cmd_postfix": "", - "delete_cmd_prefix": "del ", - "library": "var_list.py", - "varRefreshCmd": "print(var_dic_list())" - }, - "r": { - "delete_cmd_postfix": ") ", - "delete_cmd_prefix": "rm(", - "library": "var_list.r", - "varRefreshCmd": "cat(var_dic_list()) " - } - }, - "types_to_exclude": [ - "module", - "function", - "builtin_function_or_method", - "instance", - "_Feature" - ], - "window_display": false - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/examples/models/tracing/deployment_grpc.yaml b/examples/models/tracing/deployment_grpc.yaml new file mode 100644 index 0000000000..075654f43c --- /dev/null +++ b/examples/models/tracing/deployment_grpc.yaml @@ -0,0 +1,49 @@ +apiVersion: machinelearning.seldon.io/v1 +kind: SeldonDeployment +metadata: + name: tracing-example + namespace: seldon +spec: + name: tracing-example + predictors: + - componentSpecs: + - spec: + containers: + - env: + - name: TRACING + value: '1' + - name: JAEGER_AGENT_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: JAEGER_AGENT_PORT + value: '5775' + - name: JAEGER_SAMPLER_TYPE + value: const + - name: JAEGER_SAMPLER_PARAM + value: '1' + image: seldonio/mock_classifier_grpc:1.3 + name: model1 + terminationGracePeriodSeconds: 1 + graph: + children: [] + endpoint: + type: GRPC + name: model1 + type: MODEL + name: tracing + replicas: 1 + svcOrchSpec: + env: + - name: TRACING + value: '1' + - name: JAEGER_AGENT_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: JAEGER_AGENT_PORT + value: '5775' + - name: JAEGER_SAMPLER_TYPE + value: const + - name: JAEGER_SAMPLER_PARAM + value: '1' diff --git a/examples/models/tracing/deployment_rest.yaml b/examples/models/tracing/deployment_rest.yaml new file mode 100644 index 0000000000..e8ceb70e56 --- /dev/null +++ b/examples/models/tracing/deployment_rest.yaml @@ -0,0 +1,49 @@ +apiVersion: machinelearning.seldon.io/v1 +kind: SeldonDeployment +metadata: + name: tracing-example + namespace: seldon +spec: + name: tracing-example + predictors: + - componentSpecs: + - spec: + containers: + - env: + - name: TRACING + value: '1' + - name: JAEGER_AGENT_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: JAEGER_AGENT_PORT + value: '5775' + - name: JAEGER_SAMPLER_TYPE + value: const + - name: JAEGER_SAMPLER_PARAM + value: '1' + image: seldonio/mock_classifier_rest:1.3 + name: model1 + terminationGracePeriodSeconds: 1 + graph: + children: [] + endpoint: + type: REST + name: model1 + type: MODEL + name: tracing + replicas: 1 + svcOrchSpec: + env: + - name: TRACING + value: '1' + - name: JAEGER_AGENT_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: JAEGER_AGENT_PORT + value: '5775' + - name: JAEGER_SAMPLER_TYPE + value: const + - name: JAEGER_SAMPLER_PARAM + value: '1' diff --git a/examples/models/tracing/jaeger-ui-grpc-example.png b/examples/models/tracing/jaeger-ui-grpc-example.png new file mode 100644 index 0000000000..f616b92cf5 Binary files /dev/null and b/examples/models/tracing/jaeger-ui-grpc-example.png differ diff --git a/examples/models/tracing/jaeger-ui-rest-example.png b/examples/models/tracing/jaeger-ui-rest-example.png new file mode 100644 index 0000000000..775074a85a Binary files /dev/null and b/examples/models/tracing/jaeger-ui-rest-example.png differ diff --git a/examples/models/tracing/simplest.yaml b/examples/models/tracing/simplest.yaml new file mode 100644 index 0000000000..2962201b69 --- /dev/null +++ b/examples/models/tracing/simplest.yaml @@ -0,0 +1,7 @@ +apiVersion: jaegertracing.io/v1 +kind: Jaeger +metadata: + name: simplest +spec: + agent: + strategy: DaemonSet diff --git a/examples/models/tracing/tracing.ipynb b/examples/models/tracing/tracing.ipynb new file mode 100644 index 0000000000..dadd27065a --- /dev/null +++ b/examples/models/tracing/tracing.ipynb @@ -0,0 +1,274 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Distributed Tracing Template\n", + "\n", + " Illustrate the configuration for allowing distributed tracing using Jaeger.\n", + " \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup Seldon Core\n", + "\n", + "Install Seldon Core as described in [docs](https://docs.seldon.io/projects/seldon-core/en/latest/workflow/install.html)\n", + "\n", + "Then port-forward to that ingress on localhost:8003 in a separate terminal either with:\n", + "\n", + " * Ambassador: \n", + " \n", + " ```kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080```\n", + " \n", + " * Istio: \n", + " \n", + " ```kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl create namespace seldon" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl config set-context $(kubectl config current-context) --namespace=seldon" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Install Jaeger\n", + "\n", + "Follow the Jaeger docs to [install on Kubernetes](https://www.jaegertracing.io/docs/1.16/operator/)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pygmentize simplest.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl apply -f simplest.yaml" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Port forward to Jaeger UI\n", + "\n", + "```bash\n", + "kubectl port-forward $(kubectl get pods -l app.kubernetes.io/name=simplest -n seldon -o jsonpath='{.items[0].metadata.name}') 16686:16686 -n seldon\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Run Example REST Deployment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pygmentize deployment_rest.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl create -f deployment_rest.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl rollout status deployment/tracing-example-tracing-9e7c0d8" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!curl -d '{\"data\": {\"ndarray\":[[1.0, 2.0, 5.0]]}}' \\\n", + " -X POST http://localhost:8003/seldon/seldon/tracing-example/api/v1.0/predictions \\\n", + " -H \"Content-Type: application/json\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Check the Jaeger UI. You should be able to find traces like below:\n", + "\n", + "![rest](jaeger-ui-rest-example.png)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl delete -f deployment_rest.yaml" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Run Example GRPC Deployment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pygmentize deployment_grpc.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl create -f deployment_grpc.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl rollout status deployment/tracing-example-tracing-4276fb6" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!cd ../../../executor/proto && grpcurl -d '{\"data\":{\"ndarray\":[[1.0,2.0]]}}' \\\n", + " -rpc-header seldon:tracing-example -rpc-header namespace:seldon \\\n", + " -plaintext \\\n", + " -proto ./prediction.proto 0.0.0.0:8003 seldon.protos.Seldon/Predict" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Check the Jaeger UI. You should be able to find traces like below:\n", + "\n", + "\n", + "![grpc](jaeger-ui-grpc-example.png)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl delete -f deployment_grpc.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + }, + "varInspector": { + "cols": { + "lenName": 16, + "lenType": 16, + "lenVar": 40 + }, + "kernels_config": { + "python": { + "delete_cmd_postfix": "", + "delete_cmd_prefix": "del ", + "library": "var_list.py", + "varRefreshCmd": "print(var_dic_list())" + }, + "r": { + "delete_cmd_postfix": ") ", + "delete_cmd_prefix": "rm(", + "library": "var_list.r", + "varRefreshCmd": "cat(var_dic_list()) " + } + }, + "types_to_exclude": [ + "module", + "function", + "builtin_function_or_method", + "instance", + "_Feature" + ], + "window_display": false + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/executor/.gitignore b/executor/.gitignore new file mode 100644 index 0000000000..b394b3ec4b --- /dev/null +++ b/executor/.gitignore @@ -0,0 +1,8 @@ +executor +./vendor/ +./tensorflow/ +./serving/ +./bin/ +cover.out +executor.tar + \ No newline at end of file diff --git a/executor/Dockerfile b/executor/Dockerfile new file mode 100644 index 0000000000..13f2d7f6d6 --- /dev/null +++ b/executor/Dockerfile @@ -0,0 +1,28 @@ +# Build the manager binary +FROM golang:1.13 as builder + +WORKDIR /workspace +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +COPY proto/ proto/ +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +COPY main.go main.go +COPY api/ api/ +COPY predictor/ predictor/ +COPY logger/ logger/ + +# Build +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o executor main.go + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:latest +WORKDIR / +COPY --from=builder /workspace/executor . +COPY licenses/license.txt . +ENTRYPOINT ["/executor"] diff --git a/executor/Makefile b/executor/Makefile new file mode 100644 index 0000000000..cebeab6ec1 --- /dev/null +++ b/executor/Makefile @@ -0,0 +1,77 @@ +VERSION := $(shell cat ../version.txt) +# Image URL to use all building/pushing image targets +IMG ?= seldonio/seldon-core-executor:${VERSION} + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +# Run go fmt against code +fmt: + go fmt ./... + +# Run go vet against code +vet: + go vet ./... + + +# Build manager binary +executor: fmt vet + go build -o executor main.go + + +.PHONY: copy_protos +copy_protos: + cp -r ../proto/tensorflow/tensorflow/** proto/tensorflow + + +.PHONY: compile_seldon_proto +compile_seldon_proto: + cp ../proto/prediction.proto api/grpc + cd api/grpc && protoc -I. -I${GOPATH}/src/github.com/tensorflow/tensorflow --go_out=paths=source_relative,plugins=grpc:. prediction.proto + rm api/grpc/prediction.proto + +# https://github.com/tensorflow/serving/issues/1365#issuecomment-525351995 +.PHONY: compile_tensorflow_proto +compile_tensorflow_proto: + git clone -b r1.15 https://github.com/tensorflow/tensorflow.git + git clone -b r1.14 https://github.com/tensorflow/serving.git + go run protoc.go + go mod edit -replace=github.com/tensorflow/tensorflow/tensorflow/go/core=./proto/tensorflow/core + cd proto/tensorflow/core && go mod init github.com/tensorflow/tensorflow/tensorflow/go/core && cd - + go build ./proto/tensorflow/serving + +.PHONY: add_protos +add_protos: + cd tensorflow && find ./tensorflow -name '*.proto' | cpio -pdm ../proto + cd serving && find ./tensorflow_serving -name '*.proto' | cpio -pdm ../proto + +# Run tests +test: fmt vet + go test ./api/... ./predictor/... -coverprofile cover.out + +# Build the docker image +docker-build: test + docker build . -t ${IMG} + +# Push the docker image +docker-push: + docker push ${IMG} + + +kind-image-install: docker-build + kind load -v 3 docker-image ${IMG} + + +.PHONY: clean +clean: + rm -rf vendor + rm -rf tensorflow + rm -rf serving + +licenses/dep.txt: + go list -m all | cut -d ' ' -f 1 > licenses/dep.txt + diff --git a/executor/README.md b/executor/README.md new file mode 100644 index 0000000000..2888c1213f --- /dev/null +++ b/executor/README.md @@ -0,0 +1,57 @@ +# Seldon Executor + +This Go project replaces the Seldon Java Engine. It is presently in development. + +** Do not use in production ** + + +## Functionality + +The focus is to provide a smaller more efficient graph orchestror. + + * REST and gRPC for Seldon and Tensorflow protocols. Easily extendable to other protocols. + * Logging of request and or response payloads to arbitrary URLs with CloudEvents + * Tracing for REST and gRPC + * Prometheus metrics for REST and gRPC + +Changes to existing service orchestrator + + * All components must be REST or gRPC in agraph. No mixing. + * Not meta data additions to payloads are carried out by the executor. + + +## Testing + +You can choose to use this executor by adding the annotation `seldon.io/executor: "true"`. This annotation will be active until this project progresses from incubating status. + +An example is shown below: + +```JSON +apiVersion: machinelearning.seldon.io/v1alpha2 +kind: SeldonDeployment +metadata: + labels: + app: seldon + name: seldon-model +spec: + annotations: + seldon.io/executor: "true" + name: test-deployment + predictors: + - componentSpecs: + - spec: + containers: + - image: seldonio/mock_classifier_rest:1.3 + name: classifier + graph: + children: [] + endpoint: + type: REST + name: classifier + type: MODEL + labels: + version: v1 + name: example + replicas: 1 + +``` diff --git a/executor/api/client/client.go b/executor/api/client/client.go new file mode 100644 index 0000000000..f85666848b --- /dev/null +++ b/executor/api/client/client.go @@ -0,0 +1,55 @@ +package client + +import ( + "context" + "fmt" + "github.com/seldonio/seldon-core/executor/api/payload" + "golang.org/x/xerrors" + "io" +) + +const ( + SeldonPredictPath = "/predict" + SeldonTransformInputPath = "/transform-input" + SeldonTransformOutputPath = "/transform-output" + SeldonCombinePath = "/aggregate" + SeldonRoutePath = "/route" + SeldonFeedbackPath = "/send-feedback" + SeldonStatusPath = "/health/status" + SeldonMetadataPath = "/metadata" +) + +type SeldonApiClient interface { + Predict(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) + TransformInput(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) + Route(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (int, error) + Combine(ctx context.Context, modelName string, host string, port int32, msgs []payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) + TransformOutput(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) + Feedback(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) + Chain(ctx context.Context, modelName string, msg payload.SeldonPayload) (payload.SeldonPayload, error) + Status(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) + Metadata(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) + Unmarshall(msg []byte) (payload.SeldonPayload, error) + Marshall(out io.Writer, msg payload.SeldonPayload) error + CreateErrorPayload(err error) payload.SeldonPayload +} + +type SeldonApiError struct { + Message string + Code int + frame xerrors.Frame +} + +func (se SeldonApiError) FormatError(p xerrors.Printer) error { + p.Printf("%d %s", se.Code, se.Message) + se.frame.Format(p) + return nil +} + +func (se SeldonApiError) Format(f fmt.State, c rune) { + xerrors.FormatError(se, f, c) +} + +func (se SeldonApiError) Error() string { + return fmt.Sprint(se) +} diff --git a/executor/api/client/seldondeployment_client.go b/executor/api/client/seldondeployment_client.go new file mode 100644 index 0000000000..c34b01a5bd --- /dev/null +++ b/executor/api/client/seldondeployment_client.go @@ -0,0 +1,67 @@ +package client + +import ( + "fmt" + "github.com/go-logr/logr" + "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + clientset "github.com/seldonio/seldon-core/operator/client/machinelearning/v1/clientset/versioned/typed/machinelearning/v1" + v1meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/util/homedir" + "path/filepath" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" +) + +type SeldonDeploymentClient struct { + client *clientset.MachinelearningV1Client + Log logr.Logger +} + +func NewSeldonDeploymentClient(path *string) *SeldonDeploymentClient { + + var config *rest.Config + var err error + + if path != nil && *path != "" { + config, err = clientcmd.BuildConfigFromFlags("", *path) + if err != nil { + panic(err.Error()) + } + } else { + config, err = rest.InClusterConfig() + if err != nil { + if home := homedir.HomeDir(); home != "" { + homepath := filepath.Join(home, ".kube", "config") + config, err = clientcmd.BuildConfigFromFlags("", homepath) + if err != nil { + panic(err.Error()) + } + } + } + } + + kubeClientset, err := clientset.NewForConfig(config) + if err != nil { + panic(err.Error()) + } + + return &SeldonDeploymentClient{ + kubeClientset, + logf.Log.WithName("SeldonRestApi"), + } +} + +func (sd *SeldonDeploymentClient) GetPredictor(sdepName string, namespace string, predictorName string) (*v1.PredictorSpec, error) { + sdep, err := sd.client.SeldonDeployments(namespace).Get(sdepName, v1meta.GetOptions{}) + if err != nil { + return nil, err + } + for _, predictor := range sdep.Spec.Predictors { + if predictor.Name == predictorName { + return &predictor, nil + } + } + + return nil, fmt.Errorf("Failed to find predictor with name %s", predictorName) +} diff --git a/executor/api/constants.go b/executor/api/constants.go new file mode 100644 index 0000000000..a8255e1a55 --- /dev/null +++ b/executor/api/constants.go @@ -0,0 +1,4 @@ +package api + +const ProtocolSeldon = "seldon" +const ProtocolTensorflow = "tensorflow" diff --git a/executor/api/grpc/client.go b/executor/api/grpc/client.go new file mode 100644 index 0000000000..85359a303f --- /dev/null +++ b/executor/api/grpc/client.go @@ -0,0 +1,15 @@ +package grpc + +import ( + "context" + "google.golang.org/grpc/metadata" +) + +func AddMetadataToOutgoingGrpcContext(ctx context.Context, meta map[string][]string) context.Context { + for k, vv := range meta { + for _, v := range vv { + ctx = metadata.AppendToOutgoingContext(ctx, k, v) + } + } + return ctx +} diff --git a/executor/api/grpc/client_test.go b/executor/api/grpc/client_test.go new file mode 100644 index 0000000000..9eeae31cfa --- /dev/null +++ b/executor/api/grpc/client_test.go @@ -0,0 +1,23 @@ +package grpc + +import ( + "context" + . "github.com/onsi/gomega" + "github.com/seldonio/seldon-core/executor/api/payload" + "google.golang.org/grpc/metadata" + "testing" +) + +func TestAddPuidToCtx(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + + ctx := context.Background() + meta := CollectMetadata(ctx) + ctx = AddMetadataToOutgoingGrpcContext(ctx, meta) + + md, ok := metadata.FromOutgoingContext(ctx) + g.Expect(ok).To(BeTrue()) + g.Expect(md.Get(payload.SeldonPUIDHeader)).NotTo(BeNil()) + +} diff --git a/executor/api/grpc/seldon/client.go b/executor/api/grpc/seldon/client.go new file mode 100644 index 0000000000..6b7afa8cd5 --- /dev/null +++ b/executor/api/grpc/seldon/client.go @@ -0,0 +1,185 @@ +package seldon + +import ( + "context" + "fmt" + "github.com/go-logr/logr" + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" + "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/seldonio/seldon-core/executor/api/client" + grpc2 "github.com/seldonio/seldon-core/executor/api/grpc" + "github.com/seldonio/seldon-core/executor/api/grpc/seldon/proto" + "github.com/seldonio/seldon-core/executor/api/metric" + "github.com/seldonio/seldon-core/executor/api/payload" + "github.com/seldonio/seldon-core/executor/api/util" + v1 "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + "google.golang.org/grpc" + "io" + "math" + "net/http" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" +) + +type SeldonMessageGrpcClient struct { + Log logr.Logger + callOptions []grpc.CallOption + conns map[string]*grpc.ClientConn + Predictor *v1.PredictorSpec + DeploymentName string +} + +func NewSeldonGrpcClient(spec *v1.PredictorSpec, deploymentName string) client.SeldonApiClient { + opts := []grpc.CallOption{ + grpc.MaxCallSendMsgSize(math.MaxInt32), + grpc.MaxCallRecvMsgSize(math.MaxInt32), + } + smgc := SeldonMessageGrpcClient{ + Log: logf.Log.WithName("SeldonGrpcClient"), + callOptions: opts, + conns: make(map[string]*grpc.ClientConn), + Predictor: spec, + DeploymentName: deploymentName, + } + return &smgc +} + +func (s *SeldonMessageGrpcClient) getConnection(host string, port int32, modelName string) (*grpc.ClientConn, error) { + k := fmt.Sprintf("%s:%d", host, port) + if conn, ok := s.conns[k]; ok { + return conn, nil + } else { + opts := []grpc.DialOption{ + grpc.WithInsecure(), + } + if opentracing.IsGlobalTracerRegistered() { + opts = append(opts, grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(grpc_opentracing.UnaryClientInterceptor(), + metric.NewClientMetrics(s.Predictor, s.DeploymentName, modelName).UnaryClientInterceptor()))) + } else { + opts = append(opts, grpc.WithUnaryInterceptor(metric.NewClientMetrics(s.Predictor, s.DeploymentName, modelName).UnaryClientInterceptor())) + } + conn, err := grpc.Dial(fmt.Sprintf("%s:%d", host, port), opts...) + if err != nil { + return nil, err + } + s.conns[k] = conn + return conn, nil + } +} + +func (s *SeldonMessageGrpcClient) Chain(ctx context.Context, modelName string, msg payload.SeldonPayload) (payload.SeldonPayload, error) { + return msg, nil +} + +func (s *SeldonMessageGrpcClient) Predict(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + conn, err := s.getConnection(host, port, modelName) + if err != nil { + return s.CreateErrorPayload(err), err + } + grpcClient := proto.NewModelClient(conn) + resp, err := grpcClient.Predict(grpc2.AddMetadataToOutgoingGrpcContext(ctx, meta), msg.GetPayload().(*proto.SeldonMessage), s.callOptions...) + if err != nil { + return s.CreateErrorPayload(err), err + } + resPayload := payload.ProtoPayload{Msg: resp} + return &resPayload, nil +} + +func (s *SeldonMessageGrpcClient) TransformInput(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + conn, err := s.getConnection(host, port, modelName) + if err != nil { + return s.CreateErrorPayload(err), err + } + grpcClient := proto.NewTransformerClient(conn) + resp, err := grpcClient.TransformInput(grpc2.AddMetadataToOutgoingGrpcContext(ctx, meta), msg.GetPayload().(*proto.SeldonMessage), s.callOptions...) + if err != nil { + return s.CreateErrorPayload(err), err + } + resPayload := payload.ProtoPayload{Msg: resp} + return &resPayload, nil +} + +func (s *SeldonMessageGrpcClient) Route(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (int, error) { + conn, err := s.getConnection(host, port, modelName) + if err != nil { + return 0, err + } + grpcClient := proto.NewRouterClient(conn) + resp, err := grpcClient.Route(grpc2.AddMetadataToOutgoingGrpcContext(ctx, meta), msg.GetPayload().(*proto.SeldonMessage), s.callOptions...) + if err != nil { + return 0, err + } + routes := util.ExtractRouteFromSeldonMessage(resp) + //Only returning first route. API could be extended to allow multiple routes + return routes[0], nil +} + +func (s *SeldonMessageGrpcClient) Combine(ctx context.Context, modelName string, host string, port int32, msgs []payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + conn, err := s.getConnection(host, port, modelName) + if err != nil { + return s.CreateErrorPayload(err), err + } + sms := make([]*proto.SeldonMessage, len(msgs)) + for i, sm := range msgs { + sms[i] = sm.GetPayload().(*proto.SeldonMessage) + } + grpcClient := proto.NewCombinerClient(conn) + sml := proto.SeldonMessageList{SeldonMessages: sms} + resp, err := grpcClient.Aggregate(grpc2.AddMetadataToOutgoingGrpcContext(ctx, meta), &sml, s.callOptions...) + if err != nil { + return s.CreateErrorPayload(err), err + } + resPayload := payload.ProtoPayload{Msg: resp} + return &resPayload, nil +} + +func (s *SeldonMessageGrpcClient) TransformOutput(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + conn, err := s.getConnection(host, port, modelName) + if err != nil { + return s.CreateErrorPayload(err), err + } + grpcClient := proto.NewOutputTransformerClient(conn) + resp, err := grpcClient.TransformOutput(grpc2.AddMetadataToOutgoingGrpcContext(ctx, meta), msg.GetPayload().(*proto.SeldonMessage), s.callOptions...) + if err != nil { + return s.CreateErrorPayload(err), err + } + resPayload := payload.ProtoPayload{Msg: resp} + return &resPayload, nil +} + +func (s *SeldonMessageGrpcClient) Feedback(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + conn, err := s.getConnection(host, port, modelName) + if err != nil { + return s.CreateErrorPayload(err), err + } + grpcClient := proto.NewModelClient(conn) + resp, err := grpcClient.SendFeedback(grpc2.AddMetadataToOutgoingGrpcContext(ctx, meta), msg.GetPayload().(*proto.Feedback), s.callOptions...) + if err != nil { + return s.CreateErrorPayload(err), err + } + resPayload := payload.ProtoPayload{Msg: resp} + return &resPayload, nil +} + +func (s *SeldonMessageGrpcClient) Unmarshall(msg []byte) (payload.SeldonPayload, error) { + panic("Not implemented") +} + +func (s *SeldonMessageGrpcClient) Marshall(out io.Writer, msg payload.SeldonPayload) error { + panic("Not implemented") +} + +func (s *SeldonMessageGrpcClient) CreateErrorPayload(err error) payload.SeldonPayload { + respFailed := proto.SeldonMessage{Status: &proto.Status{Code: http.StatusInternalServerError, Info: err.Error()}} + res := payload.ProtoPayload{Msg: &respFailed} + return &res +} + +func (s *SeldonMessageGrpcClient) Status(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + return nil, errors.Errorf("Not implemented") +} + +func (s *SeldonMessageGrpcClient) Metadata(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + return nil, errors.Errorf("Not implemented") +} diff --git a/executor/api/grpc/seldon/proto/prediction.pb.go b/executor/api/grpc/seldon/proto/prediction.pb.go new file mode 100644 index 0000000000..91beababe2 --- /dev/null +++ b/executor/api/grpc/seldon/proto/prediction.pb.go @@ -0,0 +1,1556 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: prediction.proto + +package proto + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + _struct "github.com/golang/protobuf/ptypes/struct" + framework "github.com/tensorflow/tensorflow/tensorflow/go/core/framework" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Metric_MetricType int32 + +const ( + Metric_COUNTER Metric_MetricType = 0 + Metric_GAUGE Metric_MetricType = 1 + Metric_TIMER Metric_MetricType = 2 +) + +var Metric_MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "TIMER", +} + +var Metric_MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "TIMER": 2, +} + +func (x Metric_MetricType) String() string { + return proto.EnumName(Metric_MetricType_name, int32(x)) +} + +func (Metric_MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_430b55197713f541, []int{4, 0} +} + +type Status_StatusFlag int32 + +const ( + Status_SUCCESS Status_StatusFlag = 0 + Status_FAILURE Status_StatusFlag = 1 +) + +var Status_StatusFlag_name = map[int32]string{ + 0: "SUCCESS", + 1: "FAILURE", +} + +var Status_StatusFlag_value = map[string]int32{ + "SUCCESS": 0, + "FAILURE": 1, +} + +func (x Status_StatusFlag) String() string { + return proto.EnumName(Status_StatusFlag_name, int32(x)) +} + +func (Status_StatusFlag) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_430b55197713f541, []int{6, 0} +} + +type SeldonMessage struct { + Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + Meta *Meta `protobuf:"bytes,2,opt,name=meta,proto3" json:"meta,omitempty"` + // Types that are valid to be assigned to DataOneof: + // *SeldonMessage_Data + // *SeldonMessage_BinData + // *SeldonMessage_StrData + // *SeldonMessage_JsonData + DataOneof isSeldonMessage_DataOneof `protobuf_oneof:"data_oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SeldonMessage) Reset() { *m = SeldonMessage{} } +func (m *SeldonMessage) String() string { return proto.CompactTextString(m) } +func (*SeldonMessage) ProtoMessage() {} +func (*SeldonMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_430b55197713f541, []int{0} +} + +func (m *SeldonMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SeldonMessage.Unmarshal(m, b) +} +func (m *SeldonMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SeldonMessage.Marshal(b, m, deterministic) +} +func (m *SeldonMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeldonMessage.Merge(m, src) +} +func (m *SeldonMessage) XXX_Size() int { + return xxx_messageInfo_SeldonMessage.Size(m) +} +func (m *SeldonMessage) XXX_DiscardUnknown() { + xxx_messageInfo_SeldonMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_SeldonMessage proto.InternalMessageInfo + +func (m *SeldonMessage) GetStatus() *Status { + if m != nil { + return m.Status + } + return nil +} + +func (m *SeldonMessage) GetMeta() *Meta { + if m != nil { + return m.Meta + } + return nil +} + +type isSeldonMessage_DataOneof interface { + isSeldonMessage_DataOneof() +} + +type SeldonMessage_Data struct { + Data *DefaultData `protobuf:"bytes,3,opt,name=data,proto3,oneof"` +} + +type SeldonMessage_BinData struct { + BinData []byte `protobuf:"bytes,4,opt,name=binData,proto3,oneof"` +} + +type SeldonMessage_StrData struct { + StrData string `protobuf:"bytes,5,opt,name=strData,proto3,oneof"` +} + +type SeldonMessage_JsonData struct { + JsonData *_struct.Value `protobuf:"bytes,6,opt,name=jsonData,proto3,oneof"` +} + +func (*SeldonMessage_Data) isSeldonMessage_DataOneof() {} + +func (*SeldonMessage_BinData) isSeldonMessage_DataOneof() {} + +func (*SeldonMessage_StrData) isSeldonMessage_DataOneof() {} + +func (*SeldonMessage_JsonData) isSeldonMessage_DataOneof() {} + +func (m *SeldonMessage) GetDataOneof() isSeldonMessage_DataOneof { + if m != nil { + return m.DataOneof + } + return nil +} + +func (m *SeldonMessage) GetData() *DefaultData { + if x, ok := m.GetDataOneof().(*SeldonMessage_Data); ok { + return x.Data + } + return nil +} + +func (m *SeldonMessage) GetBinData() []byte { + if x, ok := m.GetDataOneof().(*SeldonMessage_BinData); ok { + return x.BinData + } + return nil +} + +func (m *SeldonMessage) GetStrData() string { + if x, ok := m.GetDataOneof().(*SeldonMessage_StrData); ok { + return x.StrData + } + return "" +} + +func (m *SeldonMessage) GetJsonData() *_struct.Value { + if x, ok := m.GetDataOneof().(*SeldonMessage_JsonData); ok { + return x.JsonData + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*SeldonMessage) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*SeldonMessage_Data)(nil), + (*SeldonMessage_BinData)(nil), + (*SeldonMessage_StrData)(nil), + (*SeldonMessage_JsonData)(nil), + } +} + +type DefaultData struct { + Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` + // Types that are valid to be assigned to DataOneof: + // *DefaultData_Tensor + // *DefaultData_Ndarray + // *DefaultData_Tftensor + DataOneof isDefaultData_DataOneof `protobuf_oneof:"data_oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DefaultData) Reset() { *m = DefaultData{} } +func (m *DefaultData) String() string { return proto.CompactTextString(m) } +func (*DefaultData) ProtoMessage() {} +func (*DefaultData) Descriptor() ([]byte, []int) { + return fileDescriptor_430b55197713f541, []int{1} +} + +func (m *DefaultData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DefaultData.Unmarshal(m, b) +} +func (m *DefaultData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DefaultData.Marshal(b, m, deterministic) +} +func (m *DefaultData) XXX_Merge(src proto.Message) { + xxx_messageInfo_DefaultData.Merge(m, src) +} +func (m *DefaultData) XXX_Size() int { + return xxx_messageInfo_DefaultData.Size(m) +} +func (m *DefaultData) XXX_DiscardUnknown() { + xxx_messageInfo_DefaultData.DiscardUnknown(m) +} + +var xxx_messageInfo_DefaultData proto.InternalMessageInfo + +func (m *DefaultData) GetNames() []string { + if m != nil { + return m.Names + } + return nil +} + +type isDefaultData_DataOneof interface { + isDefaultData_DataOneof() +} + +type DefaultData_Tensor struct { + Tensor *Tensor `protobuf:"bytes,2,opt,name=tensor,proto3,oneof"` +} + +type DefaultData_Ndarray struct { + Ndarray *_struct.ListValue `protobuf:"bytes,3,opt,name=ndarray,proto3,oneof"` +} + +type DefaultData_Tftensor struct { + Tftensor *framework.TensorProto `protobuf:"bytes,4,opt,name=tftensor,proto3,oneof"` +} + +func (*DefaultData_Tensor) isDefaultData_DataOneof() {} + +func (*DefaultData_Ndarray) isDefaultData_DataOneof() {} + +func (*DefaultData_Tftensor) isDefaultData_DataOneof() {} + +func (m *DefaultData) GetDataOneof() isDefaultData_DataOneof { + if m != nil { + return m.DataOneof + } + return nil +} + +func (m *DefaultData) GetTensor() *Tensor { + if x, ok := m.GetDataOneof().(*DefaultData_Tensor); ok { + return x.Tensor + } + return nil +} + +func (m *DefaultData) GetNdarray() *_struct.ListValue { + if x, ok := m.GetDataOneof().(*DefaultData_Ndarray); ok { + return x.Ndarray + } + return nil +} + +func (m *DefaultData) GetTftensor() *framework.TensorProto { + if x, ok := m.GetDataOneof().(*DefaultData_Tftensor); ok { + return x.Tftensor + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*DefaultData) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*DefaultData_Tensor)(nil), + (*DefaultData_Ndarray)(nil), + (*DefaultData_Tftensor)(nil), + } +} + +type Tensor struct { + Shape []int32 `protobuf:"varint,1,rep,packed,name=shape,proto3" json:"shape,omitempty"` + Values []float64 `protobuf:"fixed64,2,rep,packed,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Tensor) Reset() { *m = Tensor{} } +func (m *Tensor) String() string { return proto.CompactTextString(m) } +func (*Tensor) ProtoMessage() {} +func (*Tensor) Descriptor() ([]byte, []int) { + return fileDescriptor_430b55197713f541, []int{2} +} + +func (m *Tensor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Tensor.Unmarshal(m, b) +} +func (m *Tensor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Tensor.Marshal(b, m, deterministic) +} +func (m *Tensor) XXX_Merge(src proto.Message) { + xxx_messageInfo_Tensor.Merge(m, src) +} +func (m *Tensor) XXX_Size() int { + return xxx_messageInfo_Tensor.Size(m) +} +func (m *Tensor) XXX_DiscardUnknown() { + xxx_messageInfo_Tensor.DiscardUnknown(m) +} + +var xxx_messageInfo_Tensor proto.InternalMessageInfo + +func (m *Tensor) GetShape() []int32 { + if m != nil { + return m.Shape + } + return nil +} + +func (m *Tensor) GetValues() []float64 { + if m != nil { + return m.Values + } + return nil +} + +type Meta struct { + Puid string `protobuf:"bytes,1,opt,name=puid,proto3" json:"puid,omitempty"` + Tags map[string]*_struct.Value `protobuf:"bytes,2,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Routing map[string]int32 `protobuf:"bytes,3,rep,name=routing,proto3" json:"routing,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + RequestPath map[string]string `protobuf:"bytes,4,rep,name=requestPath,proto3" json:"requestPath,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Metrics []*Metric `protobuf:"bytes,5,rep,name=metrics,proto3" json:"metrics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Meta) Reset() { *m = Meta{} } +func (m *Meta) String() string { return proto.CompactTextString(m) } +func (*Meta) ProtoMessage() {} +func (*Meta) Descriptor() ([]byte, []int) { + return fileDescriptor_430b55197713f541, []int{3} +} + +func (m *Meta) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Meta.Unmarshal(m, b) +} +func (m *Meta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Meta.Marshal(b, m, deterministic) +} +func (m *Meta) XXX_Merge(src proto.Message) { + xxx_messageInfo_Meta.Merge(m, src) +} +func (m *Meta) XXX_Size() int { + return xxx_messageInfo_Meta.Size(m) +} +func (m *Meta) XXX_DiscardUnknown() { + xxx_messageInfo_Meta.DiscardUnknown(m) +} + +var xxx_messageInfo_Meta proto.InternalMessageInfo + +func (m *Meta) GetPuid() string { + if m != nil { + return m.Puid + } + return "" +} + +func (m *Meta) GetTags() map[string]*_struct.Value { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Meta) GetRouting() map[string]int32 { + if m != nil { + return m.Routing + } + return nil +} + +func (m *Meta) GetRequestPath() map[string]string { + if m != nil { + return m.RequestPath + } + return nil +} + +func (m *Meta) GetMetrics() []*Metric { + if m != nil { + return m.Metrics + } + return nil +} + +type Metric struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Type Metric_MetricType `protobuf:"varint,2,opt,name=type,proto3,enum=seldon.protos.Metric_MetricType" json:"type,omitempty"` + Value float32 `protobuf:"fixed32,3,opt,name=value,proto3" json:"value,omitempty"` + Tags map[string]string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_430b55197713f541, []int{4} +} + +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *Metric) GetType() Metric_MetricType { + if m != nil { + return m.Type + } + return Metric_COUNTER +} + +func (m *Metric) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Metric) GetTags() map[string]string { + if m != nil { + return m.Tags + } + return nil +} + +type SeldonMessageList struct { + SeldonMessages []*SeldonMessage `protobuf:"bytes,1,rep,name=seldonMessages,proto3" json:"seldonMessages,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SeldonMessageList) Reset() { *m = SeldonMessageList{} } +func (m *SeldonMessageList) String() string { return proto.CompactTextString(m) } +func (*SeldonMessageList) ProtoMessage() {} +func (*SeldonMessageList) Descriptor() ([]byte, []int) { + return fileDescriptor_430b55197713f541, []int{5} +} + +func (m *SeldonMessageList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SeldonMessageList.Unmarshal(m, b) +} +func (m *SeldonMessageList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SeldonMessageList.Marshal(b, m, deterministic) +} +func (m *SeldonMessageList) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeldonMessageList.Merge(m, src) +} +func (m *SeldonMessageList) XXX_Size() int { + return xxx_messageInfo_SeldonMessageList.Size(m) +} +func (m *SeldonMessageList) XXX_DiscardUnknown() { + xxx_messageInfo_SeldonMessageList.DiscardUnknown(m) +} + +var xxx_messageInfo_SeldonMessageList proto.InternalMessageInfo + +func (m *SeldonMessageList) GetSeldonMessages() []*SeldonMessage { + if m != nil { + return m.SeldonMessages + } + return nil +} + +type Status struct { + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Info string `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` + Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason,omitempty"` + Status Status_StatusFlag `protobuf:"varint,4,opt,name=status,proto3,enum=seldon.protos.Status_StatusFlag" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_430b55197713f541, []int{6} +} + +func (m *Status) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Status.Unmarshal(m, b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return xxx_messageInfo_Status.Size(m) +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +func (m *Status) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *Status) GetInfo() string { + if m != nil { + return m.Info + } + return "" +} + +func (m *Status) GetReason() string { + if m != nil { + return m.Reason + } + return "" +} + +func (m *Status) GetStatus() Status_StatusFlag { + if m != nil { + return m.Status + } + return Status_SUCCESS +} + +type Feedback struct { + Request *SeldonMessage `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` + Response *SeldonMessage `protobuf:"bytes,2,opt,name=response,proto3" json:"response,omitempty"` + Reward float32 `protobuf:"fixed32,3,opt,name=reward,proto3" json:"reward,omitempty"` + Truth *SeldonMessage `protobuf:"bytes,4,opt,name=truth,proto3" json:"truth,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Feedback) Reset() { *m = Feedback{} } +func (m *Feedback) String() string { return proto.CompactTextString(m) } +func (*Feedback) ProtoMessage() {} +func (*Feedback) Descriptor() ([]byte, []int) { + return fileDescriptor_430b55197713f541, []int{7} +} + +func (m *Feedback) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Feedback.Unmarshal(m, b) +} +func (m *Feedback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Feedback.Marshal(b, m, deterministic) +} +func (m *Feedback) XXX_Merge(src proto.Message) { + xxx_messageInfo_Feedback.Merge(m, src) +} +func (m *Feedback) XXX_Size() int { + return xxx_messageInfo_Feedback.Size(m) +} +func (m *Feedback) XXX_DiscardUnknown() { + xxx_messageInfo_Feedback.DiscardUnknown(m) +} + +var xxx_messageInfo_Feedback proto.InternalMessageInfo + +func (m *Feedback) GetRequest() *SeldonMessage { + if m != nil { + return m.Request + } + return nil +} + +func (m *Feedback) GetResponse() *SeldonMessage { + if m != nil { + return m.Response + } + return nil +} + +func (m *Feedback) GetReward() float32 { + if m != nil { + return m.Reward + } + return 0 +} + +func (m *Feedback) GetTruth() *SeldonMessage { + if m != nil { + return m.Truth + } + return nil +} + +type RequestResponse struct { + Request *SeldonMessage `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` + Response *SeldonMessage `protobuf:"bytes,2,opt,name=response,proto3" json:"response,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RequestResponse) Reset() { *m = RequestResponse{} } +func (m *RequestResponse) String() string { return proto.CompactTextString(m) } +func (*RequestResponse) ProtoMessage() {} +func (*RequestResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_430b55197713f541, []int{8} +} + +func (m *RequestResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RequestResponse.Unmarshal(m, b) +} +func (m *RequestResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RequestResponse.Marshal(b, m, deterministic) +} +func (m *RequestResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestResponse.Merge(m, src) +} +func (m *RequestResponse) XXX_Size() int { + return xxx_messageInfo_RequestResponse.Size(m) +} +func (m *RequestResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RequestResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestResponse proto.InternalMessageInfo + +func (m *RequestResponse) GetRequest() *SeldonMessage { + if m != nil { + return m.Request + } + return nil +} + +func (m *RequestResponse) GetResponse() *SeldonMessage { + if m != nil { + return m.Response + } + return nil +} + +func init() { + proto.RegisterEnum("seldon.protos.Metric_MetricType", Metric_MetricType_name, Metric_MetricType_value) + proto.RegisterEnum("seldon.protos.Status_StatusFlag", Status_StatusFlag_name, Status_StatusFlag_value) + proto.RegisterType((*SeldonMessage)(nil), "seldon.protos.SeldonMessage") + proto.RegisterType((*DefaultData)(nil), "seldon.protos.DefaultData") + proto.RegisterType((*Tensor)(nil), "seldon.protos.Tensor") + proto.RegisterType((*Meta)(nil), "seldon.protos.Meta") + proto.RegisterMapType((map[string]string)(nil), "seldon.protos.Meta.RequestPathEntry") + proto.RegisterMapType((map[string]int32)(nil), "seldon.protos.Meta.RoutingEntry") + proto.RegisterMapType((map[string]*_struct.Value)(nil), "seldon.protos.Meta.TagsEntry") + proto.RegisterType((*Metric)(nil), "seldon.protos.Metric") + proto.RegisterMapType((map[string]string)(nil), "seldon.protos.Metric.TagsEntry") + proto.RegisterType((*SeldonMessageList)(nil), "seldon.protos.SeldonMessageList") + proto.RegisterType((*Status)(nil), "seldon.protos.Status") + proto.RegisterType((*Feedback)(nil), "seldon.protos.Feedback") + proto.RegisterType((*RequestResponse)(nil), "seldon.protos.RequestResponse") +} + +func init() { proto.RegisterFile("prediction.proto", fileDescriptor_430b55197713f541) } + +var fileDescriptor_430b55197713f541 = []byte{ + // 1046 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0xdb, 0x6e, 0x23, 0x45, + 0x13, 0xf6, 0xd8, 0x1e, 0x3b, 0x2e, 0x67, 0xb3, 0xb3, 0xfd, 0xef, 0xc1, 0xb2, 0xf6, 0x17, 0x96, + 0x85, 0x96, 0x5c, 0xb0, 0x1e, 0xf0, 0x2e, 0x10, 0x45, 0x68, 0xa5, 0x1c, 0x9c, 0x83, 0xb4, 0xd9, + 0x44, 0x6d, 0x07, 0x69, 0x91, 0x10, 0x6a, 0x7b, 0xda, 0x93, 0x21, 0xf6, 0xf4, 0xd0, 0xdd, 0x43, + 0xc8, 0x35, 0xe2, 0x82, 0x07, 0x40, 0x02, 0x1e, 0x81, 0x27, 0xe1, 0x0e, 0xf1, 0x46, 0xa8, 0x0f, + 0x63, 0x3b, 0xc6, 0xeb, 0x5c, 0xc4, 0x42, 0x5c, 0xb9, 0xbb, 0xea, 0xfb, 0xbe, 0xea, 0xaa, 0xae, + 0x6a, 0xdb, 0xe0, 0x25, 0x9c, 0x06, 0xd1, 0x40, 0x46, 0x2c, 0x6e, 0x25, 0x9c, 0x49, 0x86, 0xee, + 0x09, 0x3a, 0x0a, 0xb2, 0x9d, 0xa8, 0x3f, 0x0d, 0x19, 0x0b, 0x47, 0xd4, 0xd7, 0xdb, 0x7e, 0x3a, + 0xf4, 0x85, 0xe4, 0xe9, 0x40, 0x1a, 0x77, 0xfd, 0x99, 0xa4, 0xb1, 0x60, 0x7c, 0x38, 0x62, 0x57, + 0xfe, 0x80, 0x71, 0xea, 0x0f, 0x39, 0x19, 0xd3, 0x2b, 0xc6, 0x2f, 0x7d, 0xe3, 0x31, 0xb8, 0xe6, + 0xcf, 0x79, 0xb8, 0xd7, 0xd5, 0xba, 0x27, 0x54, 0x08, 0x12, 0x52, 0xf4, 0x1c, 0x4a, 0x42, 0x12, + 0x99, 0x8a, 0x9a, 0xd3, 0x70, 0x36, 0xab, 0xed, 0x47, 0xad, 0x1b, 0x71, 0x5b, 0x5d, 0xed, 0xc4, + 0x16, 0x84, 0x3e, 0x80, 0xe2, 0x98, 0x4a, 0x52, 0xcb, 0x6b, 0xf0, 0xff, 0xe6, 0xc0, 0x27, 0x54, + 0x12, 0xac, 0x01, 0xe8, 0x23, 0x28, 0x06, 0x44, 0x92, 0x5a, 0x41, 0x03, 0xeb, 0x73, 0xc0, 0x7d, + 0x3a, 0x24, 0xe9, 0x48, 0xee, 0x13, 0x49, 0x8e, 0x72, 0x58, 0x23, 0x51, 0x1d, 0xca, 0xfd, 0x28, + 0x56, 0xa6, 0x5a, 0xb1, 0xe1, 0x6c, 0xae, 0x1f, 0xe5, 0x70, 0x66, 0x50, 0x3e, 0x21, 0xb9, 0xf6, + 0xb9, 0x0d, 0x67, 0xb3, 0xa2, 0x7c, 0xd6, 0x80, 0x5e, 0xc2, 0xda, 0x37, 0x82, 0x19, 0x62, 0x49, + 0x47, 0x7b, 0xdc, 0x32, 0xc5, 0x6a, 0x65, 0xc5, 0x6a, 0x7d, 0x41, 0x46, 0x29, 0x3d, 0xca, 0xe1, + 0x09, 0x72, 0x77, 0x1d, 0x40, 0x45, 0xfd, 0x9a, 0xc5, 0x94, 0x0d, 0x9b, 0x7f, 0x39, 0x50, 0x9d, + 0x39, 0x13, 0x7a, 0x08, 0x6e, 0x4c, 0xc6, 0x54, 0x15, 0xa5, 0xb0, 0x59, 0xc1, 0x66, 0x83, 0x7c, + 0x28, 0x99, 0x6a, 0xda, 0xf4, 0xe7, 0x6b, 0xd5, 0xd3, 0xce, 0xa3, 0x1c, 0xb6, 0x30, 0xf4, 0x29, + 0x94, 0xe3, 0x80, 0x70, 0x4e, 0xae, 0x27, 0x75, 0x98, 0x3f, 0xd9, 0xeb, 0x48, 0xc8, 0xec, 0x74, + 0x19, 0x18, 0x7d, 0x02, 0x6b, 0x72, 0x68, 0x43, 0x15, 0x35, 0xf1, 0x49, 0x6b, 0x7a, 0xc3, 0x36, + 0xce, 0x99, 0x92, 0x50, 0x39, 0x65, 0xd0, 0xb9, 0x9c, 0x5e, 0x41, 0xc9, 0x00, 0x51, 0x0d, 0x5c, + 0x71, 0x41, 0x12, 0xaa, 0xb3, 0x71, 0x77, 0xf3, 0x9e, 0x83, 0x8d, 0x01, 0xd5, 0xa1, 0xf4, 0x9d, + 0x0a, 0x2e, 0x6a, 0xf9, 0x46, 0x61, 0xd3, 0xd1, 0x2e, 0x6b, 0x69, 0xfe, 0x59, 0x80, 0xa2, 0xba, + 0x50, 0x84, 0xa0, 0x98, 0xa4, 0x51, 0xa0, 0x1b, 0xa4, 0x82, 0xf5, 0x1a, 0x7d, 0x0c, 0x45, 0x49, + 0x42, 0x43, 0xab, 0xb6, 0xff, 0xbf, 0xa0, 0x0f, 0x5a, 0x3d, 0x12, 0x8a, 0x4e, 0x2c, 0xf9, 0x35, + 0xd6, 0x50, 0xb4, 0x0d, 0x65, 0xce, 0x52, 0x19, 0xc5, 0x61, 0xad, 0xa0, 0x59, 0x8d, 0x45, 0x2c, + 0x6c, 0x20, 0x86, 0x98, 0x11, 0xd0, 0x01, 0x54, 0x39, 0xfd, 0x36, 0xa5, 0x42, 0x9e, 0x11, 0x79, + 0x51, 0x2b, 0x6a, 0xfe, 0xfb, 0x0b, 0xf9, 0x53, 0x98, 0xd1, 0x98, 0x25, 0x22, 0x1f, 0xca, 0x63, + 0x2a, 0x79, 0x34, 0x10, 0x35, 0x57, 0x6b, 0x3c, 0xfa, 0xa7, 0x06, 0x8f, 0x06, 0x38, 0x43, 0xd5, + 0x4f, 0xa1, 0x32, 0xc9, 0x03, 0x79, 0x50, 0xb8, 0xa4, 0xd7, 0xb6, 0x0e, 0x6a, 0x89, 0x3e, 0x04, + 0x57, 0x57, 0xcb, 0x36, 0xc4, 0x3b, 0x1a, 0x0f, 0x1b, 0xd0, 0x76, 0x7e, 0xcb, 0xa9, 0x6f, 0xc3, + 0xfa, 0x6c, 0x8a, 0x0b, 0x34, 0x1f, 0xce, 0x6a, 0xba, 0xb3, 0xdc, 0x57, 0xe0, 0xcd, 0xa7, 0x77, + 0x1b, 0xbf, 0x32, 0xc3, 0x6f, 0xfe, 0x98, 0x87, 0x92, 0x49, 0x70, 0x01, 0xed, 0x25, 0x14, 0xe5, + 0x75, 0x62, 0x58, 0x1b, 0x8b, 0xee, 0x86, 0x47, 0x03, 0xfb, 0xd1, 0xbb, 0x4e, 0x28, 0xd6, 0xe8, + 0x69, 0x30, 0xd5, 0xdf, 0x79, 0x1b, 0x0c, 0xbd, 0xb0, 0xdd, 0x61, 0xee, 0xe9, 0xbd, 0xc5, 0x5a, + 0x73, 0xfd, 0x51, 0xff, 0x6c, 0x79, 0xa9, 0xdf, 0x9d, 0x96, 0x0f, 0x30, 0x3d, 0x17, 0xaa, 0x42, + 0x79, 0xef, 0xf4, 0xfc, 0x4d, 0xaf, 0x83, 0xbd, 0x1c, 0xaa, 0x80, 0x7b, 0xb8, 0x73, 0x7e, 0xd8, + 0xf1, 0x1c, 0xb5, 0xec, 0x1d, 0x9f, 0x74, 0xb0, 0x97, 0x6f, 0xbe, 0x85, 0x07, 0x37, 0x1e, 0x41, + 0x35, 0x83, 0x68, 0x1f, 0x36, 0xc4, 0xac, 0xd1, 0xcc, 0x7e, 0xb5, 0xfd, 0x74, 0xfe, 0x41, 0x9c, + 0x05, 0xe1, 0x39, 0x4e, 0xf3, 0x77, 0x07, 0x4a, 0xe6, 0xc9, 0x54, 0x63, 0x33, 0x60, 0x01, 0xd5, + 0x39, 0xb8, 0x58, 0xaf, 0x95, 0x2d, 0x8a, 0x87, 0xcc, 0xe6, 0xa0, 0xd7, 0xe8, 0x31, 0x94, 0x38, + 0x25, 0x82, 0xc5, 0xba, 0x86, 0x15, 0x6c, 0x77, 0x68, 0x6b, 0xf2, 0x32, 0x17, 0x17, 0x5e, 0x89, + 0x09, 0x63, 0x3f, 0x0e, 0x46, 0x24, 0xcc, 0x1e, 0xe9, 0xe6, 0x33, 0x80, 0xa9, 0x55, 0x15, 0xa4, + 0x7b, 0xbe, 0xb7, 0xd7, 0xe9, 0x76, 0xbd, 0x9c, 0xda, 0x1c, 0xec, 0x1c, 0xbf, 0x3e, 0xc7, 0x1d, + 0xcf, 0x69, 0xfe, 0xe1, 0xc0, 0xda, 0x01, 0xa5, 0x41, 0x9f, 0x0c, 0x2e, 0xd5, 0x5b, 0x65, 0x27, + 0xc5, 0x7e, 0x13, 0x2c, 0x4f, 0x3c, 0x03, 0xa3, 0x2d, 0x58, 0xe3, 0x54, 0x24, 0x2c, 0x16, 0xd9, + 0x14, 0x2c, 0x27, 0x4e, 0xd0, 0x26, 0xf1, 0x2b, 0xc2, 0x03, 0xdb, 0x3c, 0x76, 0x87, 0xda, 0xe0, + 0x4a, 0x9e, 0xea, 0x31, 0xbf, 0x5d, 0xce, 0x40, 0x9b, 0x3f, 0x38, 0x70, 0xdf, 0xce, 0x06, 0xce, + 0xf4, 0xff, 0xf5, 0x8c, 0xda, 0x3f, 0x15, 0xa0, 0x7c, 0x48, 0x63, 0xaa, 0x26, 0xec, 0x0d, 0x6c, + 0xf4, 0x38, 0x89, 0xc5, 0x90, 0xf1, 0xf1, 0x71, 0x9c, 0xa4, 0x12, 0x2d, 0x55, 0xa9, 0x2f, 0xf5, + 0x36, 0x73, 0xe8, 0x14, 0xee, 0x4f, 0xf4, 0x4e, 0x53, 0x79, 0x77, 0xc1, 0x0e, 0xb8, 0xea, 0x25, + 0xa2, 0x77, 0x94, 0x39, 0x81, 0xca, 0x4e, 0x18, 0x72, 0x1a, 0x12, 0x49, 0x51, 0x63, 0x19, 0x58, + 0x8d, 0xd9, 0xad, 0x72, 0x87, 0xb0, 0xde, 0xa5, 0x71, 0x30, 0x69, 0xcb, 0x27, 0x73, 0xf8, 0xcc, + 0x71, 0x9b, 0x50, 0xfb, 0x57, 0x07, 0xdc, 0x13, 0x16, 0xd0, 0x11, 0x3a, 0x84, 0xf2, 0x99, 0xf9, + 0x75, 0x75, 0xc7, 0x54, 0x57, 0x76, 0xb6, 0x5f, 0x1c, 0x28, 0xe9, 0xda, 0xf3, 0x55, 0xdd, 0xc2, + 0xca, 0x8e, 0xf6, 0x15, 0x54, 0x27, 0x6d, 0x46, 0xf9, 0xaa, 0xbb, 0xb8, 0x1d, 0xc0, 0x03, 0xd3, + 0xbc, 0xb3, 0x41, 0x56, 0xdd, 0xda, 0xed, 0xb7, 0xb0, 0xb6, 0xc7, 0xc6, 0xfd, 0x28, 0xa6, 0x7c, + 0xc5, 0xfd, 0xd9, 0xfe, 0x4d, 0x3d, 0xf0, 0xda, 0xf6, 0xdf, 0xeb, 0xab, 0xdd, 0x3e, 0x78, 0x11, + 0xbb, 0x89, 0xd9, 0xf5, 0xce, 0x26, 0xff, 0x2c, 0xf4, 0x2f, 0x46, 0xf1, 0xe5, 0xe7, 0x61, 0x24, + 0x2f, 0xd2, 0x7e, 0x6b, 0xc0, 0xc6, 0xbe, 0xc1, 0x46, 0xcc, 0x2e, 0x9e, 0xeb, 0x3f, 0x0f, 0xf4, + 0x7b, 0x32, 0x4e, 0x46, 0x54, 0xf8, 0x57, 0x9c, 0x24, 0x09, 0xe5, 0xc2, 0x0f, 0x99, 0x9f, 0x5c, + 0x86, 0x3e, 0x49, 0xa2, 0x7e, 0x49, 0xeb, 0xbe, 0xf8, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x47, 0x57, + 0x0b, 0x26, 0xb2, 0x0c, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GenericClient is the client API for Generic service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GenericClient interface { + TransformInput(ctx context.Context, in *SeldonMessage, opts ...grpc.CallOption) (*SeldonMessage, error) + TransformOutput(ctx context.Context, in *SeldonMessage, opts ...grpc.CallOption) (*SeldonMessage, error) + Route(ctx context.Context, in *SeldonMessage, opts ...grpc.CallOption) (*SeldonMessage, error) + Aggregate(ctx context.Context, in *SeldonMessageList, opts ...grpc.CallOption) (*SeldonMessage, error) + SendFeedback(ctx context.Context, in *Feedback, opts ...grpc.CallOption) (*SeldonMessage, error) +} + +type genericClient struct { + cc *grpc.ClientConn +} + +func NewGenericClient(cc *grpc.ClientConn) GenericClient { + return &genericClient{cc} +} + +func (c *genericClient) TransformInput(ctx context.Context, in *SeldonMessage, opts ...grpc.CallOption) (*SeldonMessage, error) { + out := new(SeldonMessage) + err := c.cc.Invoke(ctx, "/seldon.protos.Generic/TransformInput", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *genericClient) TransformOutput(ctx context.Context, in *SeldonMessage, opts ...grpc.CallOption) (*SeldonMessage, error) { + out := new(SeldonMessage) + err := c.cc.Invoke(ctx, "/seldon.protos.Generic/TransformOutput", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *genericClient) Route(ctx context.Context, in *SeldonMessage, opts ...grpc.CallOption) (*SeldonMessage, error) { + out := new(SeldonMessage) + err := c.cc.Invoke(ctx, "/seldon.protos.Generic/Route", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *genericClient) Aggregate(ctx context.Context, in *SeldonMessageList, opts ...grpc.CallOption) (*SeldonMessage, error) { + out := new(SeldonMessage) + err := c.cc.Invoke(ctx, "/seldon.protos.Generic/Aggregate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *genericClient) SendFeedback(ctx context.Context, in *Feedback, opts ...grpc.CallOption) (*SeldonMessage, error) { + out := new(SeldonMessage) + err := c.cc.Invoke(ctx, "/seldon.protos.Generic/SendFeedback", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GenericServer is the server API for Generic service. +type GenericServer interface { + TransformInput(context.Context, *SeldonMessage) (*SeldonMessage, error) + TransformOutput(context.Context, *SeldonMessage) (*SeldonMessage, error) + Route(context.Context, *SeldonMessage) (*SeldonMessage, error) + Aggregate(context.Context, *SeldonMessageList) (*SeldonMessage, error) + SendFeedback(context.Context, *Feedback) (*SeldonMessage, error) +} + +// UnimplementedGenericServer can be embedded to have forward compatible implementations. +type UnimplementedGenericServer struct { +} + +func (*UnimplementedGenericServer) TransformInput(ctx context.Context, req *SeldonMessage) (*SeldonMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method TransformInput not implemented") +} +func (*UnimplementedGenericServer) TransformOutput(ctx context.Context, req *SeldonMessage) (*SeldonMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method TransformOutput not implemented") +} +func (*UnimplementedGenericServer) Route(ctx context.Context, req *SeldonMessage) (*SeldonMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method Route not implemented") +} +func (*UnimplementedGenericServer) Aggregate(ctx context.Context, req *SeldonMessageList) (*SeldonMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method Aggregate not implemented") +} +func (*UnimplementedGenericServer) SendFeedback(ctx context.Context, req *Feedback) (*SeldonMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendFeedback not implemented") +} + +func RegisterGenericServer(s *grpc.Server, srv GenericServer) { + s.RegisterService(&_Generic_serviceDesc, srv) +} + +func _Generic_TransformInput_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SeldonMessage) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GenericServer).TransformInput(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/seldon.protos.Generic/TransformInput", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GenericServer).TransformInput(ctx, req.(*SeldonMessage)) + } + return interceptor(ctx, in, info, handler) +} + +func _Generic_TransformOutput_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SeldonMessage) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GenericServer).TransformOutput(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/seldon.protos.Generic/TransformOutput", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GenericServer).TransformOutput(ctx, req.(*SeldonMessage)) + } + return interceptor(ctx, in, info, handler) +} + +func _Generic_Route_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SeldonMessage) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GenericServer).Route(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/seldon.protos.Generic/Route", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GenericServer).Route(ctx, req.(*SeldonMessage)) + } + return interceptor(ctx, in, info, handler) +} + +func _Generic_Aggregate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SeldonMessageList) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GenericServer).Aggregate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/seldon.protos.Generic/Aggregate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GenericServer).Aggregate(ctx, req.(*SeldonMessageList)) + } + return interceptor(ctx, in, info, handler) +} + +func _Generic_SendFeedback_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Feedback) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GenericServer).SendFeedback(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/seldon.protos.Generic/SendFeedback", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GenericServer).SendFeedback(ctx, req.(*Feedback)) + } + return interceptor(ctx, in, info, handler) +} + +var _Generic_serviceDesc = grpc.ServiceDesc{ + ServiceName: "seldon.protos.Generic", + HandlerType: (*GenericServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "TransformInput", + Handler: _Generic_TransformInput_Handler, + }, + { + MethodName: "TransformOutput", + Handler: _Generic_TransformOutput_Handler, + }, + { + MethodName: "Route", + Handler: _Generic_Route_Handler, + }, + { + MethodName: "Aggregate", + Handler: _Generic_Aggregate_Handler, + }, + { + MethodName: "SendFeedback", + Handler: _Generic_SendFeedback_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "prediction.proto", +} + +// ModelClient is the client API for Model service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ModelClient interface { + Predict(ctx context.Context, in *SeldonMessage, opts ...grpc.CallOption) (*SeldonMessage, error) + SendFeedback(ctx context.Context, in *Feedback, opts ...grpc.CallOption) (*SeldonMessage, error) +} + +type modelClient struct { + cc *grpc.ClientConn +} + +func NewModelClient(cc *grpc.ClientConn) ModelClient { + return &modelClient{cc} +} + +func (c *modelClient) Predict(ctx context.Context, in *SeldonMessage, opts ...grpc.CallOption) (*SeldonMessage, error) { + out := new(SeldonMessage) + err := c.cc.Invoke(ctx, "/seldon.protos.Model/Predict", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *modelClient) SendFeedback(ctx context.Context, in *Feedback, opts ...grpc.CallOption) (*SeldonMessage, error) { + out := new(SeldonMessage) + err := c.cc.Invoke(ctx, "/seldon.protos.Model/SendFeedback", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ModelServer is the server API for Model service. +type ModelServer interface { + Predict(context.Context, *SeldonMessage) (*SeldonMessage, error) + SendFeedback(context.Context, *Feedback) (*SeldonMessage, error) +} + +// UnimplementedModelServer can be embedded to have forward compatible implementations. +type UnimplementedModelServer struct { +} + +func (*UnimplementedModelServer) Predict(ctx context.Context, req *SeldonMessage) (*SeldonMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method Predict not implemented") +} +func (*UnimplementedModelServer) SendFeedback(ctx context.Context, req *Feedback) (*SeldonMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendFeedback not implemented") +} + +func RegisterModelServer(s *grpc.Server, srv ModelServer) { + s.RegisterService(&_Model_serviceDesc, srv) +} + +func _Model_Predict_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SeldonMessage) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServer).Predict(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/seldon.protos.Model/Predict", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServer).Predict(ctx, req.(*SeldonMessage)) + } + return interceptor(ctx, in, info, handler) +} + +func _Model_SendFeedback_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Feedback) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServer).SendFeedback(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/seldon.protos.Model/SendFeedback", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServer).SendFeedback(ctx, req.(*Feedback)) + } + return interceptor(ctx, in, info, handler) +} + +var _Model_serviceDesc = grpc.ServiceDesc{ + ServiceName: "seldon.protos.Model", + HandlerType: (*ModelServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Predict", + Handler: _Model_Predict_Handler, + }, + { + MethodName: "SendFeedback", + Handler: _Model_SendFeedback_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "prediction.proto", +} + +// RouterClient is the client API for Router service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type RouterClient interface { + Route(ctx context.Context, in *SeldonMessage, opts ...grpc.CallOption) (*SeldonMessage, error) + SendFeedback(ctx context.Context, in *Feedback, opts ...grpc.CallOption) (*SeldonMessage, error) +} + +type routerClient struct { + cc *grpc.ClientConn +} + +func NewRouterClient(cc *grpc.ClientConn) RouterClient { + return &routerClient{cc} +} + +func (c *routerClient) Route(ctx context.Context, in *SeldonMessage, opts ...grpc.CallOption) (*SeldonMessage, error) { + out := new(SeldonMessage) + err := c.cc.Invoke(ctx, "/seldon.protos.Router/Route", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *routerClient) SendFeedback(ctx context.Context, in *Feedback, opts ...grpc.CallOption) (*SeldonMessage, error) { + out := new(SeldonMessage) + err := c.cc.Invoke(ctx, "/seldon.protos.Router/SendFeedback", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// RouterServer is the server API for Router service. +type RouterServer interface { + Route(context.Context, *SeldonMessage) (*SeldonMessage, error) + SendFeedback(context.Context, *Feedback) (*SeldonMessage, error) +} + +// UnimplementedRouterServer can be embedded to have forward compatible implementations. +type UnimplementedRouterServer struct { +} + +func (*UnimplementedRouterServer) Route(ctx context.Context, req *SeldonMessage) (*SeldonMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method Route not implemented") +} +func (*UnimplementedRouterServer) SendFeedback(ctx context.Context, req *Feedback) (*SeldonMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendFeedback not implemented") +} + +func RegisterRouterServer(s *grpc.Server, srv RouterServer) { + s.RegisterService(&_Router_serviceDesc, srv) +} + +func _Router_Route_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SeldonMessage) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RouterServer).Route(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/seldon.protos.Router/Route", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RouterServer).Route(ctx, req.(*SeldonMessage)) + } + return interceptor(ctx, in, info, handler) +} + +func _Router_SendFeedback_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Feedback) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RouterServer).SendFeedback(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/seldon.protos.Router/SendFeedback", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RouterServer).SendFeedback(ctx, req.(*Feedback)) + } + return interceptor(ctx, in, info, handler) +} + +var _Router_serviceDesc = grpc.ServiceDesc{ + ServiceName: "seldon.protos.Router", + HandlerType: (*RouterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Route", + Handler: _Router_Route_Handler, + }, + { + MethodName: "SendFeedback", + Handler: _Router_SendFeedback_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "prediction.proto", +} + +// TransformerClient is the client API for Transformer service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TransformerClient interface { + TransformInput(ctx context.Context, in *SeldonMessage, opts ...grpc.CallOption) (*SeldonMessage, error) +} + +type transformerClient struct { + cc *grpc.ClientConn +} + +func NewTransformerClient(cc *grpc.ClientConn) TransformerClient { + return &transformerClient{cc} +} + +func (c *transformerClient) TransformInput(ctx context.Context, in *SeldonMessage, opts ...grpc.CallOption) (*SeldonMessage, error) { + out := new(SeldonMessage) + err := c.cc.Invoke(ctx, "/seldon.protos.Transformer/TransformInput", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TransformerServer is the server API for Transformer service. +type TransformerServer interface { + TransformInput(context.Context, *SeldonMessage) (*SeldonMessage, error) +} + +// UnimplementedTransformerServer can be embedded to have forward compatible implementations. +type UnimplementedTransformerServer struct { +} + +func (*UnimplementedTransformerServer) TransformInput(ctx context.Context, req *SeldonMessage) (*SeldonMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method TransformInput not implemented") +} + +func RegisterTransformerServer(s *grpc.Server, srv TransformerServer) { + s.RegisterService(&_Transformer_serviceDesc, srv) +} + +func _Transformer_TransformInput_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SeldonMessage) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TransformerServer).TransformInput(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/seldon.protos.Transformer/TransformInput", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TransformerServer).TransformInput(ctx, req.(*SeldonMessage)) + } + return interceptor(ctx, in, info, handler) +} + +var _Transformer_serviceDesc = grpc.ServiceDesc{ + ServiceName: "seldon.protos.Transformer", + HandlerType: (*TransformerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "TransformInput", + Handler: _Transformer_TransformInput_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "prediction.proto", +} + +// OutputTransformerClient is the client API for OutputTransformer service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type OutputTransformerClient interface { + TransformOutput(ctx context.Context, in *SeldonMessage, opts ...grpc.CallOption) (*SeldonMessage, error) +} + +type outputTransformerClient struct { + cc *grpc.ClientConn +} + +func NewOutputTransformerClient(cc *grpc.ClientConn) OutputTransformerClient { + return &outputTransformerClient{cc} +} + +func (c *outputTransformerClient) TransformOutput(ctx context.Context, in *SeldonMessage, opts ...grpc.CallOption) (*SeldonMessage, error) { + out := new(SeldonMessage) + err := c.cc.Invoke(ctx, "/seldon.protos.OutputTransformer/TransformOutput", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// OutputTransformerServer is the server API for OutputTransformer service. +type OutputTransformerServer interface { + TransformOutput(context.Context, *SeldonMessage) (*SeldonMessage, error) +} + +// UnimplementedOutputTransformerServer can be embedded to have forward compatible implementations. +type UnimplementedOutputTransformerServer struct { +} + +func (*UnimplementedOutputTransformerServer) TransformOutput(ctx context.Context, req *SeldonMessage) (*SeldonMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method TransformOutput not implemented") +} + +func RegisterOutputTransformerServer(s *grpc.Server, srv OutputTransformerServer) { + s.RegisterService(&_OutputTransformer_serviceDesc, srv) +} + +func _OutputTransformer_TransformOutput_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SeldonMessage) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OutputTransformerServer).TransformOutput(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/seldon.protos.OutputTransformer/TransformOutput", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OutputTransformerServer).TransformOutput(ctx, req.(*SeldonMessage)) + } + return interceptor(ctx, in, info, handler) +} + +var _OutputTransformer_serviceDesc = grpc.ServiceDesc{ + ServiceName: "seldon.protos.OutputTransformer", + HandlerType: (*OutputTransformerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "TransformOutput", + Handler: _OutputTransformer_TransformOutput_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "prediction.proto", +} + +// CombinerClient is the client API for Combiner service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type CombinerClient interface { + Aggregate(ctx context.Context, in *SeldonMessageList, opts ...grpc.CallOption) (*SeldonMessage, error) +} + +type combinerClient struct { + cc *grpc.ClientConn +} + +func NewCombinerClient(cc *grpc.ClientConn) CombinerClient { + return &combinerClient{cc} +} + +func (c *combinerClient) Aggregate(ctx context.Context, in *SeldonMessageList, opts ...grpc.CallOption) (*SeldonMessage, error) { + out := new(SeldonMessage) + err := c.cc.Invoke(ctx, "/seldon.protos.Combiner/Aggregate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CombinerServer is the server API for Combiner service. +type CombinerServer interface { + Aggregate(context.Context, *SeldonMessageList) (*SeldonMessage, error) +} + +// UnimplementedCombinerServer can be embedded to have forward compatible implementations. +type UnimplementedCombinerServer struct { +} + +func (*UnimplementedCombinerServer) Aggregate(ctx context.Context, req *SeldonMessageList) (*SeldonMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method Aggregate not implemented") +} + +func RegisterCombinerServer(s *grpc.Server, srv CombinerServer) { + s.RegisterService(&_Combiner_serviceDesc, srv) +} + +func _Combiner_Aggregate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SeldonMessageList) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CombinerServer).Aggregate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/seldon.protos.Combiner/Aggregate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CombinerServer).Aggregate(ctx, req.(*SeldonMessageList)) + } + return interceptor(ctx, in, info, handler) +} + +var _Combiner_serviceDesc = grpc.ServiceDesc{ + ServiceName: "seldon.protos.Combiner", + HandlerType: (*CombinerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Aggregate", + Handler: _Combiner_Aggregate_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "prediction.proto", +} + +// SeldonClient is the client API for Seldon service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SeldonClient interface { + Predict(ctx context.Context, in *SeldonMessage, opts ...grpc.CallOption) (*SeldonMessage, error) + SendFeedback(ctx context.Context, in *Feedback, opts ...grpc.CallOption) (*SeldonMessage, error) +} + +type seldonClient struct { + cc *grpc.ClientConn +} + +func NewSeldonClient(cc *grpc.ClientConn) SeldonClient { + return &seldonClient{cc} +} + +func (c *seldonClient) Predict(ctx context.Context, in *SeldonMessage, opts ...grpc.CallOption) (*SeldonMessage, error) { + out := new(SeldonMessage) + err := c.cc.Invoke(ctx, "/seldon.protos.Seldon/Predict", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seldonClient) SendFeedback(ctx context.Context, in *Feedback, opts ...grpc.CallOption) (*SeldonMessage, error) { + out := new(SeldonMessage) + err := c.cc.Invoke(ctx, "/seldon.protos.Seldon/SendFeedback", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SeldonServer is the server API for Seldon service. +type SeldonServer interface { + Predict(context.Context, *SeldonMessage) (*SeldonMessage, error) + SendFeedback(context.Context, *Feedback) (*SeldonMessage, error) +} + +// UnimplementedSeldonServer can be embedded to have forward compatible implementations. +type UnimplementedSeldonServer struct { +} + +func (*UnimplementedSeldonServer) Predict(ctx context.Context, req *SeldonMessage) (*SeldonMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method Predict not implemented") +} +func (*UnimplementedSeldonServer) SendFeedback(ctx context.Context, req *Feedback) (*SeldonMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendFeedback not implemented") +} + +func RegisterSeldonServer(s *grpc.Server, srv SeldonServer) { + s.RegisterService(&_Seldon_serviceDesc, srv) +} + +func _Seldon_Predict_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SeldonMessage) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeldonServer).Predict(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/seldon.protos.Seldon/Predict", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeldonServer).Predict(ctx, req.(*SeldonMessage)) + } + return interceptor(ctx, in, info, handler) +} + +func _Seldon_SendFeedback_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Feedback) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeldonServer).SendFeedback(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/seldon.protos.Seldon/SendFeedback", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeldonServer).SendFeedback(ctx, req.(*Feedback)) + } + return interceptor(ctx, in, info, handler) +} + +var _Seldon_serviceDesc = grpc.ServiceDesc{ + ServiceName: "seldon.protos.Seldon", + HandlerType: (*SeldonServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Predict", + Handler: _Seldon_Predict_Handler, + }, + { + MethodName: "SendFeedback", + Handler: _Seldon_SendFeedback_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "prediction.proto", +} diff --git a/executor/api/grpc/seldon/server.go b/executor/api/grpc/seldon/server.go new file mode 100644 index 0000000000..6de40bd366 --- /dev/null +++ b/executor/api/grpc/seldon/server.go @@ -0,0 +1,52 @@ +package seldon + +import ( + "context" + "github.com/go-logr/logr" + "github.com/seldonio/seldon-core/executor/api/client" + "github.com/seldonio/seldon-core/executor/api/grpc" + "github.com/seldonio/seldon-core/executor/api/grpc/seldon/proto" + "github.com/seldonio/seldon-core/executor/api/payload" + "github.com/seldonio/seldon-core/executor/predictor" + "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + "net/url" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" +) + +type GrpcSeldonServer struct { + Client client.SeldonApiClient + predictor *v1.PredictorSpec + Log logr.Logger + ServerUrl *url.URL + Namespace string +} + +func NewGrpcSeldonServer(predictor *v1.PredictorSpec, client client.SeldonApiClient, serverUrl *url.URL, namespace string) *GrpcSeldonServer { + return &GrpcSeldonServer{ + Client: client, + predictor: predictor, + Log: logf.Log.WithName("SeldonGrpcApi"), + ServerUrl: serverUrl, + Namespace: namespace, + } +} + +func (g GrpcSeldonServer) Predict(ctx context.Context, req *proto.SeldonMessage) (*proto.SeldonMessage, error) { + seldonPredictorProcess := predictor.NewPredictorProcess(ctx, g.Client, logf.Log.WithName("SeldonMessageRestClient"), g.ServerUrl, g.Namespace, grpc.CollectMetadata(ctx)) + reqPayload := payload.ProtoPayload{Msg: req} + resPayload, err := seldonPredictorProcess.Predict(g.predictor.Graph, &reqPayload) + if err != nil { + g.Log.Error(err, "Failed to call predict") + } + return resPayload.GetPayload().(*proto.SeldonMessage), err +} + +func (g GrpcSeldonServer) SendFeedback(ctx context.Context, req *proto.Feedback) (*proto.SeldonMessage, error) { + seldonPredictorProcess := predictor.NewPredictorProcess(ctx, g.Client, logf.Log.WithName("SeldonMessageRestClient"), g.ServerUrl, g.Namespace, grpc.CollectMetadata(ctx)) + reqPayload := payload.ProtoPayload{Msg: req} + resPayload, err := seldonPredictorProcess.Feedback(g.predictor.Graph, &reqPayload) + if err != nil { + g.Log.Error(err, "Failed to call feedback") + } + return resPayload.GetPayload().(*proto.SeldonMessage), err +} diff --git a/executor/api/grpc/seldon/server_test.go b/executor/api/grpc/seldon/server_test.go new file mode 100644 index 0000000000..4db4de8433 --- /dev/null +++ b/executor/api/grpc/seldon/server_test.go @@ -0,0 +1,72 @@ +package seldon + +import ( + "context" + "github.com/golang/protobuf/jsonpb" + . "github.com/onsi/gomega" + "github.com/seldonio/seldon-core/executor/api/grpc/seldon/proto" + "github.com/seldonio/seldon-core/executor/api/test" + "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + "net/url" + "testing" +) + +func TestPredict(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + + model := v1.MODEL + p := v1.PredictorSpec{ + Name: "p", + Graph: &v1.PredictiveUnit{ + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + }, + } + url, _ := url.Parse("http://localhost") + server := NewGrpcSeldonServer(&p, test.NewSeldonMessageTestClient(t, 0, nil, nil), url, "default") + + var sm proto.SeldonMessage + var data = ` {"data":{"ndarray":[[1.1,2.0]]}}` + err := jsonpb.UnmarshalString(data, &sm) + g.Expect(err).Should(BeNil()) + + res, err := server.Predict(context.TODO(), &sm) + g.Expect(err).To(BeNil()) + g.Expect(res.GetData().GetNdarray().Values[0].GetListValue().Values[0].GetNumberValue()).Should(Equal(1.1)) + g.Expect(res.GetData().GetNdarray().Values[0].GetListValue().Values[1].GetNumberValue()).Should(Equal(2.0)) +} + +func TestFeedback(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + + model := v1.MODEL + p := v1.PredictorSpec{ + Name: "p", + Graph: &v1.PredictiveUnit{ + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + }, + } + url, _ := url.Parse("http://localhost") + server := NewGrpcSeldonServer(&p, test.NewSeldonMessageTestClient(t, 0, nil, nil), url, "default") + + var sm proto.Feedback + var data = ` {"request":{"data":{"ndarray":[[1.1,2.0]]}}}` + err := jsonpb.UnmarshalString(data, &sm) + g.Expect(err).Should(BeNil()) + + res, err := server.SendFeedback(context.TODO(), &sm) + g.Expect(err).To(BeNil()) + g.Expect(res.GetData().GetNdarray().Values[0].GetListValue().Values[0].GetNumberValue()).Should(Equal(1.1)) + g.Expect(res.GetData().GetNdarray().Values[0].GetListValue().Values[1].GetNumberValue()).Should(Equal(2.0)) +} diff --git a/executor/api/grpc/server.go b/executor/api/grpc/server.go new file mode 100644 index 0000000000..9b0e865515 --- /dev/null +++ b/executor/api/grpc/server.go @@ -0,0 +1,47 @@ +package grpc + +import ( + "context" + guuid "github.com/google/uuid" + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" + "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" + "github.com/opentracing/opentracing-go" + "github.com/seldonio/seldon-core/executor/api/metric" + "github.com/seldonio/seldon-core/executor/api/payload" + v1 "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "math" +) + +const ( + ProtobufContentType = "application/protobuf" +) + +func CreateGrpcServer(spec *v1.PredictorSpec, deploymentName string) *grpc.Server { + opts := []grpc.ServerOption{ + grpc.MaxRecvMsgSize(math.MaxInt32), + grpc.MaxSendMsgSize(math.MaxInt32), + } + if opentracing.IsGlobalTracerRegistered() { + opts = append(opts, grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(grpc_opentracing.UnaryServerInterceptor(), metric.NewServerMetrics(spec, deploymentName).UnaryServerInterceptor()))) + } else { + opts = append(opts, grpc.UnaryInterceptor(metric.NewServerMetrics(spec, deploymentName).UnaryServerInterceptor())) + } + + grpcServer := grpc.NewServer(opts...) + return grpcServer +} + +func CollectMetadata(ctx context.Context) map[string][]string { + md, ok := metadata.FromIncomingContext(ctx) + if ok { + val := md.Get(payload.SeldonPUIDHeader) + if len(val) == 0 { + md.Set(payload.SeldonPUIDHeader, guuid.New().String()) + } + return md + } else { + return map[string][]string{payload.SeldonPUIDHeader: []string{guuid.New().String()}} + } +} diff --git a/executor/api/grpc/server_test.go b/executor/api/grpc/server_test.go new file mode 100644 index 0000000000..e4b27b6baf --- /dev/null +++ b/executor/api/grpc/server_test.go @@ -0,0 +1,32 @@ +package grpc + +import ( + "context" + . "github.com/onsi/gomega" + "github.com/seldonio/seldon-core/executor/api/payload" + "google.golang.org/grpc/metadata" + "strings" + "testing" +) + +func TestAddPuid(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + + ctx := context.Background() + meta := CollectMetadata(ctx) + + g.Expect(meta[payload.SeldonPUIDHeader]).NotTo(BeNil()) +} + +func TestExistingPuid(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + guid := "1" + + ctx := metadata.NewIncomingContext(context.TODO(), metadata.New(map[string]string{payload.SeldonPUIDHeader: guid})) + meta := CollectMetadata(ctx) + + g.Expect(meta[strings.ToLower(payload.SeldonPUIDHeader)]).NotTo(BeNil()) + g.Expect(meta[strings.ToLower(payload.SeldonPUIDHeader)][0]).To(Equal(guid)) +} diff --git a/executor/api/grpc/tensorflow/client.go b/executor/api/grpc/tensorflow/client.go new file mode 100644 index 0000000000..a9caa9d307 --- /dev/null +++ b/executor/api/grpc/tensorflow/client.go @@ -0,0 +1,176 @@ +package tensorflow + +import ( + "context" + "fmt" + "github.com/go-logr/logr" + "github.com/golang/protobuf/proto" + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" + "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/seldonio/seldon-core/executor/api/client" + grpc2 "github.com/seldonio/seldon-core/executor/api/grpc" + "github.com/seldonio/seldon-core/executor/api/metric" + "github.com/seldonio/seldon-core/executor/api/payload" + "github.com/seldonio/seldon-core/executor/proto/tensorflow/serving" + v1 "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + "google.golang.org/grpc" + "io" + "math" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" +) + +type TensorflowGrpcClient struct { + Log logr.Logger + callOptions []grpc.CallOption + conns map[string]*grpc.ClientConn + Predictor *v1.PredictorSpec + DeploymentName string +} + +func NewTensorflowGrpcClient(predictor *v1.PredictorSpec, deploymentName string) client.SeldonApiClient { + opts := []grpc.CallOption{ + grpc.MaxCallSendMsgSize(math.MaxInt32), + grpc.MaxCallRecvMsgSize(math.MaxInt32), + } + smgc := TensorflowGrpcClient{ + Log: logf.Log.WithName("SeldonGrpcClient"), + callOptions: opts, + conns: make(map[string]*grpc.ClientConn), + Predictor: predictor, + DeploymentName: deploymentName, + } + return &smgc +} + +func (s *TensorflowGrpcClient) getConnection(host string, port int32, modelName string) (*grpc.ClientConn, error) { + k := fmt.Sprintf("%s:%d", host, port) + if conn, ok := s.conns[k]; ok { + return conn, nil + } else { + opts := []grpc.DialOption{ + grpc.WithInsecure(), + } + if opentracing.IsGlobalTracerRegistered() { + opts = append(opts, grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(grpc_opentracing.UnaryClientInterceptor(), + metric.NewClientMetrics(s.Predictor, s.DeploymentName, modelName).UnaryClientInterceptor()))) + } else { + opts = append(opts, grpc.WithUnaryInterceptor(metric.NewClientMetrics(s.Predictor, s.DeploymentName, modelName).UnaryClientInterceptor())) + } + conn, err := grpc.Dial(fmt.Sprintf("%s:%d", host, port), opts...) + if err != nil { + return nil, err + } + s.conns[k] = conn + return conn, nil + } +} + +// Allow PredictionResponses to be turned into PredictionRequests +func (s *TensorflowGrpcClient) Chain(ctx context.Context, modelName string, msg payload.SeldonPayload) (payload.SeldonPayload, error) { + switch v := msg.GetPayload().(type) { + case *serving.PredictRequest, *serving.ClassificationRequest, *serving.MultiInferenceRequest: + s.Log.Info("Identity chain") + return msg, nil + case *serving.PredictResponse: + s.Log.Info("Chain!") + pr := serving.PredictRequest{ + ModelSpec: &serving.ModelSpec{ + Name: modelName, + }, + Inputs: v.Outputs, + } + msg2 := payload.ProtoPayload{Msg: &pr} + return &msg2, nil + default: + return nil, errors.Errorf("Invalid type %v", v) + } +} + +func (s *TensorflowGrpcClient) Predict(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + conn, err := s.getConnection(host, port, modelName) + if err != nil { + return s.CreateErrorPayload(err), err + } + grpcClient := serving.NewPredictionServiceClient(conn) + ctx = grpc2.AddMetadataToOutgoingGrpcContext(ctx, meta) + var resp proto.Message + switch v := msg.GetPayload().(type) { + case *serving.PredictRequest: + resp, err = grpcClient.Predict(ctx, v, s.callOptions...) + case *serving.ClassificationRequest: + resp, err = grpcClient.Classify(ctx, v, s.callOptions...) + case *serving.MultiInferenceRequest: + resp, err = grpcClient.MultiInference(ctx, v, s.callOptions...) + default: + return nil, errors.Errorf("Invalid type %v", v) + } + if err != nil { + return nil, err + } + resPayload := payload.ProtoPayload{Msg: resp} + return &resPayload, nil +} + +func (s *TensorflowGrpcClient) TransformInput(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + return s.Predict(ctx, modelName, host, port, msg, meta) +} + +func (s *TensorflowGrpcClient) Route(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (int, error) { + panic("Not implemented") +} + +func (s *TensorflowGrpcClient) Combine(ctx context.Context, modelName string, host string, port int32, msgs []payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + panic("Not implemented") +} + +func (s *TensorflowGrpcClient) TransformOutput(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + return s.Predict(ctx, modelName, host, port, msg, meta) +} + +func (s *TensorflowGrpcClient) Status(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + conn, err := s.getConnection(host, port, modelName) + if err != nil { + return s.CreateErrorPayload(err), err + } + grpcClient := serving.NewModelServiceClient(conn) + var resp proto.Message + resp, err = grpcClient.GetModelStatus(grpc2.AddMetadataToOutgoingGrpcContext(ctx, meta), msg.GetPayload().(*serving.GetModelStatusRequest), s.callOptions...) + if err != nil { + return nil, err + } + resPayload := payload.ProtoPayload{Msg: resp} + return &resPayload, nil +} + +func (s *TensorflowGrpcClient) Metadata(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + conn, err := s.getConnection(host, port, modelName) + if err != nil { + return s.CreateErrorPayload(err), err + } + grpcClient := serving.NewPredictionServiceClient(conn) + var resp proto.Message + resp, err = grpcClient.GetModelMetadata(grpc2.AddMetadataToOutgoingGrpcContext(ctx, meta), msg.GetPayload().(*serving.GetModelMetadataRequest), s.callOptions...) + if err != nil { + return nil, err + } + resPayload := payload.ProtoPayload{Msg: resp} + return &resPayload, nil +} + +func (s *TensorflowGrpcClient) Feedback(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + panic("implement me") +} + +func (s *TensorflowGrpcClient) Unmarshall(msg []byte) (payload.SeldonPayload, error) { + panic("Not implemented") +} + +func (s *TensorflowGrpcClient) Marshall(out io.Writer, msg payload.SeldonPayload) error { + panic("Not implemented") +} + +func (s *TensorflowGrpcClient) CreateErrorPayload(err error) payload.SeldonPayload { + panic("Not implemented") +} diff --git a/executor/api/grpc/tensorflow/server.go b/executor/api/grpc/tensorflow/server.go new file mode 100644 index 0000000000..7e1bb0e871 --- /dev/null +++ b/executor/api/grpc/tensorflow/server.go @@ -0,0 +1,98 @@ +package tensorflow + +import ( + "context" + "github.com/go-logr/logr" + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" + "github.com/seldonio/seldon-core/executor/api/client" + "github.com/seldonio/seldon-core/executor/api/grpc" + "github.com/seldonio/seldon-core/executor/api/payload" + "github.com/seldonio/seldon-core/executor/predictor" + "github.com/seldonio/seldon-core/executor/proto/tensorflow/serving" + "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + "net/url" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" +) + +type GrpcTensorflowServer struct { + Client client.SeldonApiClient + predictor *v1.PredictorSpec + Log logr.Logger + ServerUrl *url.URL + Namespace string +} + +func NewGrpcTensorflowServer(predictor *v1.PredictorSpec, client client.SeldonApiClient, serverUrl *url.URL, namespace string) *GrpcTensorflowServer { + return &GrpcTensorflowServer{ + Client: client, + predictor: predictor, + Log: logf.Log.WithName("SeldonGrpcApi"), + ServerUrl: serverUrl, + Namespace: namespace, + } +} + +func (g *GrpcTensorflowServer) execute(ctx context.Context, req proto.Message, method string) (payload.SeldonPayload, error) { + seldonPredictorProcess := predictor.NewPredictorProcess(ctx, g.Client, logf.Log.WithName(method), g.ServerUrl, g.Namespace, grpc.CollectMetadata(ctx)) + reqPayload := payload.ProtoPayload{Msg: req} + return seldonPredictorProcess.Predict(g.predictor.Graph, &reqPayload) +} + +func (g *GrpcTensorflowServer) Classify(ctx context.Context, req *serving.ClassificationRequest) (*serving.ClassificationResponse, error) { + resPayload, err := g.execute(ctx, req, "GrpcClassify") + if err != nil { + return nil, err + } + return resPayload.GetPayload().(*serving.ClassificationResponse), nil +} + +func (g *GrpcTensorflowServer) Regress(ctx context.Context, req *serving.RegressionRequest) (*serving.RegressionResponse, error) { + resPayload, err := g.execute(ctx, req, "GrpcRegress") + if err != nil { + return nil, err + } + return resPayload.GetPayload().(*serving.RegressionResponse), nil +} + +func (g *GrpcTensorflowServer) Predict(ctx context.Context, req *serving.PredictRequest) (*serving.PredictResponse, error) { + resPayload, err := g.execute(ctx, req, "GrpcPredict") + if err != nil { + return nil, err + } + return resPayload.GetPayload().(*serving.PredictResponse), nil +} + +// MultiInference API for multi-headed models. +func (g *GrpcTensorflowServer) MultiInference(ctx context.Context, req *serving.MultiInferenceRequest) (*serving.MultiInferenceResponse, error) { + resPayload, err := g.execute(ctx, req, "GrpcMultiInference") + if err != nil { + return nil, err + } + return resPayload.GetPayload().(*serving.MultiInferenceResponse), nil +} + +// GetModelMetadata - provides access to metadata for loaded models. +func (g *GrpcTensorflowServer) GetModelMetadata(ctx context.Context, req *serving.GetModelMetadataRequest) (*serving.GetModelMetadataResponse, error) { + seldonPredictorProcess := predictor.NewPredictorProcess(ctx, g.Client, logf.Log.WithName("GrpcGetModelMetadata"), g.ServerUrl, g.Namespace, grpc.CollectMetadata(ctx)) + reqPayload := payload.ProtoPayload{Msg: req} + resPayload, err := seldonPredictorProcess.Metadata(g.predictor.Graph, req.ModelSpec.Name, &reqPayload) + if err != nil { + return nil, err + } + return resPayload.GetPayload().(*serving.GetModelMetadataResponse), nil +} + +func (g *GrpcTensorflowServer) GetModelStatus(ctx context.Context, req *serving.GetModelStatusRequest) (*serving.GetModelStatusResponse, error) { + seldonPredictorProcess := predictor.NewPredictorProcess(ctx, g.Client, logf.Log.WithName("GrpcGetModelStatus"), g.ServerUrl, g.Namespace, grpc.CollectMetadata(ctx)) + reqPayload := payload.ProtoPayload{Msg: req} + resPayload, err := seldonPredictorProcess.Status(g.predictor.Graph, req.ModelSpec.Name, &reqPayload) + if err != nil { + return nil, err + } + return resPayload.GetPayload().(*serving.GetModelStatusResponse), nil +} + +func (g *GrpcTensorflowServer) HandleReloadConfigRequest(context.Context, *serving.ReloadConfigRequest) (*serving.ReloadConfigResponse, error) { + return nil, errors.Errorf("Not implemented") +} diff --git a/executor/api/grpc/tensorflow/server_test.go b/executor/api/grpc/tensorflow/server_test.go new file mode 100644 index 0000000000..2ba0400fb1 --- /dev/null +++ b/executor/api/grpc/tensorflow/server_test.go @@ -0,0 +1,209 @@ +package tensorflow + +import ( + "context" + "github.com/golang/protobuf/jsonpb" + . "github.com/onsi/gomega" + "github.com/seldonio/seldon-core/executor/api/client" + "github.com/seldonio/seldon-core/executor/api/grpc/seldon/proto" + "github.com/seldonio/seldon-core/executor/api/payload" + "github.com/seldonio/seldon-core/executor/proto/tensorflow/serving" + v1 "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + "google.golang.org/grpc/metadata" + "io" + "net/http" + "net/url" + "testing" +) + +const ( + TestMetaDataKey = "foo" + TestMetaDataVal = "bar" + TestModelVersion = int64(1) +) + +type TestTensorflowClient struct { + t *testing.T +} + +func NewTestTensorflowClient(t *testing.T) client.SeldonApiClient { + client := TestTensorflowClient{ + t: t, + } + return &client +} + +func (s TestTensorflowClient) Status(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + st := serving.GetModelStatusResponse{ + ModelVersionStatus: []*serving.ModelVersionStatus{ + &serving.ModelVersionStatus{ + Version: TestModelVersion, + }, + }, + } + sm := payload.ProtoPayload{Msg: &st} + return &sm, nil +} + +func (s TestTensorflowClient) Metadata(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + g := NewGomegaWithT(s.t) + md, ok := metadata.FromIncomingContext(ctx) + g.Expect(ok).NotTo(BeNil()) + g.Expect(md.Get(TestMetaDataKey)[0]).To(Equal(TestMetaDataVal)) + pm := msg.GetPayload().(*serving.GetModelMetadataRequest) + st := serving.GetModelMetadataResponse{ + ModelSpec: pm.ModelSpec, + } + sm := payload.ProtoPayload{Msg: &st} + return &sm, nil +} + +func (s TestTensorflowClient) Chain(ctx context.Context, modelName string, msg payload.SeldonPayload) (payload.SeldonPayload, error) { + return msg, nil +} + +func (s TestTensorflowClient) Unmarshall(msg []byte) (payload.SeldonPayload, error) { + reqPayload := payload.BytesPayload{Msg: msg, ContentType: "application/json"} + return &reqPayload, nil +} + +func (s TestTensorflowClient) Marshall(out io.Writer, msg payload.SeldonPayload) error { + panic("") +} + +func (s TestTensorflowClient) CreateErrorPayload(err error) payload.SeldonPayload { + respFailed := proto.SeldonMessage{Status: &proto.Status{Code: http.StatusInternalServerError, Info: err.Error()}} + res := payload.ProtoPayload{Msg: &respFailed} + return &res +} + +func (s TestTensorflowClient) Predict(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + g := NewGomegaWithT(s.t) + md, ok := metadata.FromIncomingContext(ctx) + g.Expect(ok).NotTo(BeNil()) + g.Expect(md.Get(TestMetaDataKey)[0]).To(Equal(TestMetaDataVal)) + pm := msg.GetPayload().(*serving.PredictRequest) + pr := serving.PredictResponse{ + Outputs: pm.Inputs, + } + sm := payload.ProtoPayload{Msg: &pr} + return &sm, nil +} + +func (s TestTensorflowClient) TransformInput(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + panic("") +} + +func (s TestTensorflowClient) Route(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (int, error) { + panic("") +} + +func (s TestTensorflowClient) Combine(ctx context.Context, modelName string, host string, port int32, msgs []payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + panic("") +} + +func (s TestTensorflowClient) TransformOutput(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + panic("") +} + +func (s TestTensorflowClient) Feedback(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + panic("not implemented") +} +func TestPredict(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + + model := v1.MODEL + p := v1.PredictorSpec{ + Name: "p", + Graph: &v1.PredictiveUnit{ + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + }, + } + url, _ := url.Parse("http://localhost") + server := NewGrpcTensorflowServer(&p, NewTestTensorflowClient(t), url, "default") + + var sm serving.PredictRequest + var data = `{"model_spec":{"name":"half_plus_two"},"inputs":{"x":{"dtype": 1, "tensor_shape": {"dim":[{"size": 3}]}, "floatVal" : [1.0, 2.0, 3.0]}}}` + err := jsonpb.UnmarshalString(data, &sm) + g.Expect(err).Should(BeNil()) + + ctx := context.Background() + ctx = metadata.NewIncomingContext(ctx, metadata.New(map[string]string{TestMetaDataKey: TestMetaDataVal})) + res, err := server.Predict(ctx, &sm) + g.Expect(err).To(BeNil()) + g.Expect(res.Outputs["x"].FloatVal[0]).Should(Equal(float32(1.0))) + g.Expect(res.Outputs["x"].FloatVal[1]).Should(Equal(float32(2.0))) +} + +func TestGetModelStatus(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + + model := v1.MODEL + p := v1.PredictorSpec{ + Name: "p", + Graph: &v1.PredictiveUnit{ + Name: "model", + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + }, + } + url, _ := url.Parse("http://localhost") + server := NewGrpcTensorflowServer(&p, NewTestTensorflowClient(t), url, "default") + + var sm serving.GetModelStatusRequest + var data = `{"model_spec":{"name":"model"}}` + err := jsonpb.UnmarshalString(data, &sm) + g.Expect(err).Should(BeNil()) + + ctx := context.Background() + ctx = metadata.NewIncomingContext(ctx, metadata.New(map[string]string{TestMetaDataKey: TestMetaDataVal})) + res, err := server.GetModelStatus(ctx, &sm) + g.Expect(err).To(BeNil()) + g.Expect(res.ModelVersionStatus[0].Version).To(Equal(TestModelVersion)) + +} + +func TestGetModelMetadata(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + + const modelName = "model" + model := v1.MODEL + p := v1.PredictorSpec{ + Name: "p", + Graph: &v1.PredictiveUnit{ + Name: modelName, + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + }, + } + url, _ := url.Parse("http://localhost") + server := NewGrpcTensorflowServer(&p, NewTestTensorflowClient(t), url, "default") + + var sm serving.GetModelMetadataRequest + var data = `{"model_spec":{"name":"model"},"metadata_field":["signature_def"]}` + err := jsonpb.UnmarshalString(data, &sm) + g.Expect(err).Should(BeNil()) + + ctx := context.Background() + ctx = metadata.NewIncomingContext(ctx, metadata.New(map[string]string{TestMetaDataKey: TestMetaDataVal})) + res, err := server.GetModelMetadata(ctx, &sm) + g.Expect(err).To(BeNil()) + g.Expect(res.ModelSpec.Name).To(Equal(modelName)) + +} diff --git a/executor/api/metric/client.go b/executor/api/metric/client.go new file mode 100644 index 0000000000..cba8461c1b --- /dev/null +++ b/executor/api/metric/client.go @@ -0,0 +1,66 @@ +package metric + +import ( + "context" + "github.com/prometheus/client_golang/prometheus" + v1 "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/status" + "strings" + "time" +) + +type ClientMetrics struct { + ClientHandledHistogram *prometheus.HistogramVec + Predictor *v1.PredictorSpec + DeploymentName string + ModelName string + ImageName string + ImageVersion string +} + +func NewClientMetrics(spec *v1.PredictorSpec, deploymentName string, modelName string) *ClientMetrics { + histogram := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: ClientRequestsMetricName, + Help: "A histogram of latencies for client calls from executor", + Buckets: prometheus.DefBuckets, + }, + []string{DeploymentNameMetric, PredictorNameMetric, PredictorVersionMetric, ServiceMetric, ModelNameMetric, ModelImageMetric, ModelVersionMetric, "method", "code"}, + ) + + err := prometheus.Register(histogram) + if err != nil { + prometheus.Unregister(histogram) + prometheus.Register(histogram) + } + container := v1.GetContainerForPredictiveUnit(spec, modelName) + imageName := "" + imageVersion := "" + if container != nil { + imageParts := strings.Split(container.Image, ":") + imageName = imageParts[0] + if len(imageParts) == 2 { + imageVersion = imageParts[1] + } + } + + return &ClientMetrics{ + ClientHandledHistogram: histogram, + Predictor: spec, + DeploymentName: deploymentName, + ModelName: modelName, + ImageName: imageName, + ImageVersion: imageVersion, + } +} + +func (m *ClientMetrics) UnaryClientInterceptor() func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + startTime := time.Now() + err := invoker(ctx, method, req, reply, cc, opts...) + st, _ := status.FromError(err) + m.ClientHandledHistogram.WithLabelValues(m.DeploymentName, m.Predictor.Name, m.Predictor.Annotations["version"], method, m.ModelName, m.ImageName, m.ImageVersion, "unary", st.Code().String()).Observe(time.Since(startTime).Seconds()) + return err + } +} diff --git a/executor/api/metric/client_test.go b/executor/api/metric/client_test.go new file mode 100644 index 0000000000..3b7dd4bb2d --- /dev/null +++ b/executor/api/metric/client_test.go @@ -0,0 +1,45 @@ +package metric + +import ( + . "github.com/onsi/gomega" + v1 "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + v12 "k8s.io/api/core/v1" + v1meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "testing" +) + +func TestNewFromtMeta(t *testing.T) { + g := NewGomegaWithT(t) + + const imageName = "image" + const imageVersion = "1.2" + const modelName = "classifier" + const deploymentName = "dep" + predictor := v1.PredictorSpec{ + Name: "", + Graph: &v1.PredictiveUnit{ + Name: modelName, + }, + ComponentSpecs: []*v1.SeldonPodSpec{ + &v1.SeldonPodSpec{ + Metadata: v1meta.ObjectMeta{}, + Spec: v12.PodSpec{ + Containers: []v12.Container{ + v12.Container{ + Name: modelName, + Image: imageName + ":" + imageVersion, + }, + }, + }, + HpaSpec: nil, + }, + }, + } + + metrics := NewClientMetrics(&predictor, deploymentName, modelName) + + g.Expect(metrics.ImageName).To(Equal(imageName)) + g.Expect(metrics.ImageVersion).To(Equal(imageVersion)) + g.Expect(metrics.ModelName).To(Equal(modelName)) + g.Expect(metrics.DeploymentName).To(Equal(deploymentName)) +} diff --git a/executor/api/metric/constants.go b/executor/api/metric/constants.go new file mode 100644 index 0000000000..078c338a03 --- /dev/null +++ b/executor/api/metric/constants.go @@ -0,0 +1,23 @@ +package metric + +const ( + CodeMetric = "code" // 2xx, 5xx etc + HTTPMethodMetric = "method" // Http Method (Post, Get etc) + ServiceMetric = "service" // http or grpc service: prediction, feedback etc + DeploymentNameMetric = "deployment_name" + PredictorNameMetric = "predictor_name" + PredictorVersionMetric = "predictor_version" + ModelNameMetric = "model_name" + ModelImageMetric = "model_image" + ModelVersionMetric = "model_version" + + PredictionServiceMetricName = "predictions" + + ServerRequestsMetricName = "seldon_api_executor_server_requests_seconds" + ClientRequestsMetricName = "seldon_api_executor_client_requests_seconds" + + PredictionHttpServiceName = "predictions" + StatusHttpServiceName = "status" + MetadataHttpServiceName = "metadata" + FeedbackHttpServiceName = "feedback" +) diff --git a/executor/api/metric/server.go b/executor/api/metric/server.go new file mode 100644 index 0000000000..92d60226ff --- /dev/null +++ b/executor/api/metric/server.go @@ -0,0 +1,48 @@ +package metric + +import ( + "context" + "github.com/prometheus/client_golang/prometheus" + v1 "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/status" + "time" +) + +type ServerMetrics struct { + ServerHandledHistogram *prometheus.HistogramVec + Predictor *v1.PredictorSpec + DeploymentName string +} + +func NewServerMetrics(spec *v1.PredictorSpec, deploymentName string) *ServerMetrics { + histogram := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: ServerRequestsMetricName, + Help: "A histogram of latencies for executor server", + Buckets: prometheus.DefBuckets, + }, + []string{DeploymentNameMetric, PredictorNameMetric, PredictorVersionMetric, ServiceMetric, "method", "code"}, + ) + err := prometheus.Register(histogram) + if err != nil { + prometheus.Unregister(histogram) + prometheus.Register(histogram) + } + return &ServerMetrics{ + ServerHandledHistogram: histogram, + Predictor: spec, + DeploymentName: deploymentName, + } +} + +// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs. +func (m *ServerMetrics) UnaryServerInterceptor() func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + startTime := time.Now() + resp, err := handler(ctx, req) + st, _ := status.FromError(err) + m.ServerHandledHistogram.WithLabelValues(m.DeploymentName, m.Predictor.Name, m.Predictor.Annotations["version"], info.FullMethod, "unary", st.Code().String()).Observe(time.Since(startTime).Seconds()) + return resp, err + } +} diff --git a/executor/api/payload/bytes.go b/executor/api/payload/bytes.go new file mode 100644 index 0000000000..60de8f3887 --- /dev/null +++ b/executor/api/payload/bytes.go @@ -0,0 +1,18 @@ +package payload + +type BytesPayload struct { + Msg []byte + ContentType string +} + +func (s *BytesPayload) GetPayload() interface{} { + return s.Msg +} + +func (s *BytesPayload) GetContentType() string { + return s.ContentType +} + +func (s *BytesPayload) GetBytes() ([]byte, error) { + return s.Msg, nil +} diff --git a/executor/api/payload/metadata.go b/executor/api/payload/metadata.go new file mode 100644 index 0000000000..ef910a0119 --- /dev/null +++ b/executor/api/payload/metadata.go @@ -0,0 +1,21 @@ +package payload + +const ( + SeldonPUIDHeader = "Seldon-Puid" +) + +type MetaData struct { + Meta map[string][]string +} + +func NewFromMap(m map[string][]string) *MetaData { + meta := MetaData{ + Meta: map[string][]string{}, + } + for k, vv := range m { + for _, v := range vv { + meta.Meta[k] = append(meta.Meta[k], v) + } + } + return &meta +} diff --git a/executor/api/payload/metadata_test.go b/executor/api/payload/metadata_test.go new file mode 100644 index 0000000000..4ea5683109 --- /dev/null +++ b/executor/api/payload/metadata_test.go @@ -0,0 +1,16 @@ +package payload + +import ( + . "github.com/onsi/gomega" + "testing" +) + +func TestNewFromtMeta(t *testing.T) { + g := NewGomegaWithT(t) + + const k = "foo" + const v = "bar" + meta := NewFromMap(map[string][]string{k: []string{v}}) + + g.Expect(meta.Meta[k][0]).To(Equal(v)) +} diff --git a/executor/api/payload/payload.go b/executor/api/payload/payload.go new file mode 100644 index 0000000000..fd3b7a6ab7 --- /dev/null +++ b/executor/api/payload/payload.go @@ -0,0 +1,7 @@ +package payload + +type SeldonPayload interface { + GetPayload() interface{} + GetContentType() string + GetBytes() ([]byte, error) +} diff --git a/executor/api/payload/payload_test.go b/executor/api/payload/payload_test.go new file mode 100644 index 0000000000..2fd9d7a87a --- /dev/null +++ b/executor/api/payload/payload_test.go @@ -0,0 +1,23 @@ +package payload + +import ( + "github.com/golang/protobuf/jsonpb" + "github.com/seldonio/seldon-core/executor/api/grpc/seldon/proto" + "gotest.tools/assert" + "testing" +) + +func TestGetPayload(t *testing.T) { + var sm proto.SeldonMessage + var data = `{"data":{"ndarray":[1.1,2]}}` + jsonpb.UnmarshalString(data, &sm) + + var sp SeldonPayload = &ProtoPayload{&sm} + + var sm2 *proto.SeldonMessage + sm2 = sp.GetPayload().(*proto.SeldonMessage) + + ma := jsonpb.Marshaler{} + msgStr, _ := ma.MarshalToString(sm2) + assert.Equal(t, data, msgStr) +} diff --git a/executor/api/payload/proto.go b/executor/api/payload/proto.go new file mode 100644 index 0000000000..736ee3879b --- /dev/null +++ b/executor/api/payload/proto.go @@ -0,0 +1,24 @@ +package payload + +import "github.com/golang/protobuf/proto" + +type ProtoPayload struct { + Msg proto.Message +} + +func (s *ProtoPayload) GetPayload() interface{} { + return s.Msg +} + +func (s *ProtoPayload) GetContentType() string { + return "application/protobuf" +} + +func (s *ProtoPayload) GetBytes() ([]byte, error) { + data, err := proto.Marshal(s.Msg) + if err != nil { + return nil, err + } else { + return data, nil + } +} diff --git a/executor/api/payload/proto_test.go b/executor/api/payload/proto_test.go new file mode 100644 index 0000000000..2ce1b724c3 --- /dev/null +++ b/executor/api/payload/proto_test.go @@ -0,0 +1,25 @@ +package payload + +import ( + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/proto" + . "github.com/onsi/gomega" + seldon "github.com/seldonio/seldon-core/executor/api/grpc/seldon/proto" + "testing" +) + +func TestGetProtoPayload(t *testing.T) { + g := NewGomegaWithT(t) + var sm seldon.SeldonMessage + var data = `{"data":{"ndarray":[1.1,2]}}` + jsonpb.UnmarshalString(data, &sm) + + payload := ProtoPayload{Msg: &sm} + b, err := payload.GetBytes() + g.Expect(err).Should(BeNil()) + var sm2 seldon.SeldonMessage + proto.Unmarshal(b, &sm2) + + g.Expect(proto.Equal(&sm2, &sm)).Should(Equal(true)) + +} diff --git a/executor/api/rest/client.go b/executor/api/rest/client.go new file mode 100644 index 0000000000..817f080598 --- /dev/null +++ b/executor/api/rest/client.go @@ -0,0 +1,288 @@ +package rest + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/go-logr/logr" + "github.com/golang/protobuf/jsonpb" + "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/seldonio/seldon-core/executor/api" + "github.com/seldonio/seldon-core/executor/api/client" + "github.com/seldonio/seldon-core/executor/api/grpc/seldon/proto" + "github.com/seldonio/seldon-core/executor/api/metric" + "github.com/seldonio/seldon-core/executor/api/payload" + "github.com/seldonio/seldon-core/executor/api/util" + v1 "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + "strconv" + "strings" +) + +const ( + ContentTypeJSON = "application/json" +) + +type JSONRestClient struct { + httpClient *http.Client + Log logr.Logger + Protocol string + DeploymentName string + predictor *v1.PredictorSpec + metrics *metric.ClientMetrics +} + +func (smc *JSONRestClient) CreateErrorPayload(err error) payload.SeldonPayload { + respFailed := proto.SeldonMessage{Status: &proto.Status{Code: http.StatusInternalServerError, Info: err.Error()}} + m := jsonpb.Marshaler{} + jStr, _ := m.MarshalToString(&respFailed) + res := payload.BytesPayload{Msg: []byte(jStr)} + return &res +} + +func (smc *JSONRestClient) Marshall(w io.Writer, msg payload.SeldonPayload) error { + _, err := w.Write(msg.GetPayload().([]byte)) + return err +} + +func (smc *JSONRestClient) Unmarshall(msg []byte) (payload.SeldonPayload, error) { + reqPayload := payload.BytesPayload{Msg: msg, ContentType: ContentTypeJSON} + return &reqPayload, nil +} + +type BytesRestClientOption func(client *JSONRestClient) + +func NewJSONRestClient(protocol string, deploymentName string, predictor *v1.PredictorSpec, options ...BytesRestClientOption) client.SeldonApiClient { + + client := JSONRestClient{ + http.DefaultClient, + logf.Log.WithName("JSONRestClient"), + protocol, + deploymentName, + predictor, + metric.NewClientMetrics(predictor, deploymentName, ""), + } + for i := range options { + options[i](&client) + } + + return &client +} + +func (smc *JSONRestClient) getMetricsRoundTripper(modelName string, service string) http.RoundTripper { + container := v1.GetContainerForPredictiveUnit(smc.predictor, modelName) + imageName := "" + imageVersion := "" + if container != nil { + imageParts := strings.Split(container.Image, ":") + imageName = imageParts[0] + if len(imageParts) == 2 { + imageVersion = imageParts[1] + } + } + return promhttp.InstrumentRoundTripperDuration(smc.metrics.ClientHandledHistogram.MustCurryWith(prometheus.Labels{ + metric.DeploymentNameMetric: smc.DeploymentName, + metric.PredictorNameMetric: smc.predictor.Name, + metric.PredictorVersionMetric: smc.predictor.Annotations["version"], + metric.ServiceMetric: service, + metric.ModelNameMetric: modelName, + metric.ModelImageMetric: imageName, + metric.ModelVersionMetric: imageVersion, + }), http.DefaultTransport) +} + +func (smc *JSONRestClient) addHeaders(req *http.Request, m map[string][]string) { + for k, vv := range m { + for _, v := range vv { + req.Header.Set(k, v) + } + } +} + +func (smc *JSONRestClient) doHttp(ctx context.Context, modelName string, method string, url *url.URL, msg []byte, meta map[string][]string) ([]byte, string, error) { + smc.Log.Info("Calling HTTP", "URL", url) + + var req *http.Request + var err error + if msg != nil { + smc.Log.Info("Building message") + req, err = http.NewRequest("POST", url.String(), bytes.NewBuffer(msg)) + if err != nil { + return nil, "", err + } + req.Header.Set("Content-Type", ContentTypeJSON) + } else { + req, err = http.NewRequest("GET", url.String(), nil) + if err != nil { + return nil, "", err + } + } + + // Add metadata passed in + smc.addHeaders(req, meta) + + if opentracing.IsGlobalTracerRegistered() { + tracer := opentracing.GlobalTracer() + + parentSpan := opentracing.SpanFromContext(ctx) + clientSpan := opentracing.StartSpan( + method, + opentracing.ChildOf(parentSpan.Context())) + defer clientSpan.Finish() + tracer.Inject(clientSpan.Context(), opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(req.Header)) + } + + client := smc.httpClient + client.Transport = smc.getMetricsRoundTripper(modelName, method) + + response, err := client.Do(req) + if err != nil { + return nil, "", err + } + + if response.StatusCode != http.StatusOK { + smc.Log.Info("httpPost failed", "response code", response.StatusCode) + return nil, "", errors.Errorf("Internal service call failed calling %s status code %d", url, response.StatusCode) + } + + //Read response + b, err := ioutil.ReadAll(response.Body) + if err != nil { + return nil, "", err + } + defer response.Body.Close() + + contentType := response.Header.Get("Content-Type") + + return b, contentType, nil +} + +func (smc *JSONRestClient) modifyMethod(method string, modelName string) string { + if smc.Protocol == api.ProtocolTensorflow { + switch method { + case client.SeldonPredictPath, client.SeldonTransformInputPath, client.SeldonTransformOutputPath: + return "/v1/models/" + modelName + ":predict" + case client.SeldonCombinePath: + return "/v1/models/" + modelName + ":aggregate" + case client.SeldonRoutePath: + return "/v1/models/" + modelName + ":route" + case client.SeldonFeedbackPath: + return "/v1/models/" + modelName + ":feedback" + case client.SeldonStatusPath: + return "/v1/models/" + modelName + case client.SeldonMetadataPath: + return "/v1/models/" + modelName + "/metadata" + } + } + return method +} + +func (smc *JSONRestClient) call(ctx context.Context, modelName string, method string, host string, port int32, req payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + url := url.URL{ + Scheme: "http", + Host: net.JoinHostPort(host, strconv.Itoa(int(port))), + Path: method, + } + var bytes []byte + if req != nil { + bytes = req.GetPayload().([]byte) + } + sm, contentType, err := smc.doHttp(ctx, modelName, method, &url, bytes, meta) + if err != nil { + return nil, err + } + res := payload.BytesPayload{Msg: sm, ContentType: contentType} + return &res, nil +} + +func (smc *JSONRestClient) Status(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + return smc.call(ctx, modelName, smc.modifyMethod(client.SeldonStatusPath, modelName), host, port, msg, meta) +} + +func (smc *JSONRestClient) Metadata(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + return smc.call(ctx, modelName, smc.modifyMethod(client.SeldonMetadataPath, modelName), host, port, msg, meta) +} + +func (smc *JSONRestClient) Chain(ctx context.Context, modelName string, msg payload.SeldonPayload) (payload.SeldonPayload, error) { + switch smc.Protocol { + case api.ProtocolSeldon: // Seldon Messages can always be chained together + return msg, nil + case api.ProtocolTensorflow: // Attempt to chain tensorflow payload + return ChainTensorflow(msg) + } + return nil, errors.Errorf("Unknown protocol %s", smc.Protocol) +} + +func (smc *JSONRestClient) Predict(ctx context.Context, modelName string, host string, port int32, req payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + return smc.call(ctx, modelName, smc.modifyMethod(client.SeldonPredictPath, modelName), host, port, req, meta) +} + +func (smc *JSONRestClient) TransformInput(ctx context.Context, modelName string, host string, port int32, req payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + return smc.call(ctx, modelName, smc.modifyMethod(client.SeldonTransformInputPath, modelName), host, port, req, meta) +} + +// Try to extract from SeldonMessage otherwise fall back to extract from Json Array +func (smc *JSONRestClient) Route(ctx context.Context, modelName string, host string, port int32, req payload.SeldonPayload, meta map[string][]string) (int, error) { + sp, err := smc.call(ctx, modelName, smc.modifyMethod(client.SeldonRoutePath, modelName), host, port, req, meta) + if err != nil { + return 0, err + } else { + var routes []int + msg := sp.GetPayload().([]byte) + + var sm proto.SeldonMessage + value := string(msg) + err := jsonpb.UnmarshalString(value, &sm) + if err == nil { + //Remove in future + routes = util.ExtractRouteFromSeldonMessage(&sm) + } else { + routes, err = ExtractRouteAsJsonArray(msg) + if err != nil { + return 0, err + } + } + + //Only returning first route. API could be extended to allow multiple routes + return routes[0], nil + } +} + +func isJSON(data []byte) bool { + var js json.RawMessage + return json.Unmarshal(data, &js) == nil +} + +func (smc *JSONRestClient) Combine(ctx context.Context, modelName string, host string, port int32, msgs []payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + // Extract into string array checking the data is JSON + strData := make([]string, len(msgs)) + for i, sm := range msgs { + if !isJSON(sm.GetPayload().([]byte)) { + return nil, fmt.Errorf("Data is not JSON") + } else { + strData[i] = string(sm.GetPayload().([]byte)) + } + } + // Create JSON list of messages + joined := strings.Join(strData, ",") + jStr := "[" + joined + "]" + req := payload.BytesPayload{Msg: []byte(jStr)} + return smc.call(ctx, modelName, smc.modifyMethod(client.SeldonCombinePath, modelName), host, port, &req, meta) +} + +func (smc *JSONRestClient) TransformOutput(ctx context.Context, modelName string, host string, port int32, req payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + return smc.call(ctx, modelName, smc.modifyMethod(client.SeldonTransformOutputPath, modelName), host, port, req, meta) +} + +func (smc *JSONRestClient) Feedback(ctx context.Context, modelName string, host string, port int32, req payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + return smc.call(ctx, modelName, smc.modifyMethod(client.SeldonFeedbackPath, modelName), host, port, req, meta) +} diff --git a/executor/api/rest/client_test.go b/executor/api/rest/client_test.go new file mode 100644 index 0000000000..878fb7d951 --- /dev/null +++ b/executor/api/rest/client_test.go @@ -0,0 +1,268 @@ +package rest + +import ( + "context" + "crypto/tls" + "github.com/golang/protobuf/jsonpb" + . "github.com/onsi/gomega" + "github.com/prometheus/client_golang/prometheus" + "github.com/seldonio/seldon-core/executor/api" + "github.com/seldonio/seldon-core/executor/api/grpc/seldon/proto" + "github.com/seldonio/seldon-core/executor/api/metric" + "github.com/seldonio/seldon-core/executor/api/payload" + v1 "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + v12 "k8s.io/api/core/v1" + v1meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "net" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "testing" +) + +const ( + okPredictResponse = `{ + "data": { + "names" : [ "a", "b" ], + "ndarray" : [[0.9,0.1]] + } + }` + okRouteResponse = `{ + "data": { + "ndarray" : [1] + } + }` + okStatusResponse = `{ + "status": "ok" + }` + okMetadataResponse = `{ + "metadata": { + "name":"mymodel" + } + }` +) + +func testingHTTPClient(g *GomegaWithT, handler http.Handler) (string, int, *http.Client, func()) { + s := httptest.NewServer(handler) + + cli := &http.Client{ + Transport: &http.Transport{ + DialContext: func(_ context.Context, network, _ string) (net.Conn, error) { + return net.Dial(network, s.Listener.Addr().String()) + }, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } + + url, err := url.Parse(s.URL) + g.Expect(err).Should(BeNil()) + port, err := strconv.Atoi(url.Port()) + g.Expect(err).Should(BeNil()) + + return url.Hostname(), port, cli, s.Close +} + +func SetHTTPClient(httpClient *http.Client) BytesRestClientOption { + return func(cli *JSONRestClient) { + cli.httpClient = httpClient + } +} + +func createPayload(g *GomegaWithT) payload.SeldonPayload { + var data = ` {"data":{"ndarray":[1.1,2.0]}}` + return &payload.BytesPayload{Msg: []byte(data)} +} + +func createTestContext() context.Context { + ctx := context.Background() + ctx = context.WithValue(ctx, payload.SeldonPUIDHeader, "1") + return ctx +} + +func TestSimpleMethods(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(okPredictResponse)) + }) + host, port, httpClient, teardown := testingHTTPClient(g, h) + defer teardown() + predictor := v1.PredictorSpec{ + Name: "test", + Annotations: map[string]string{}, + } + seldonRestClient := NewJSONRestClient(api.ProtocolSeldon, "test", &predictor, SetHTTPClient(httpClient)) + + methods := []func(context.Context, string, string, int32, payload.SeldonPayload, map[string][]string) (payload.SeldonPayload, error){seldonRestClient.Predict, seldonRestClient.TransformInput, seldonRestClient.TransformOutput, seldonRestClient.Feedback} + for _, method := range methods { + resPayload, err := method(createTestContext(), "model", host, int32(port), createPayload(g), map[string][]string{}) + g.Expect(err).Should(BeNil()) + + data := resPayload.GetPayload().([]byte) + var smRes proto.SeldonMessage + err = jsonpb.UnmarshalString(string(data), &smRes) + g.Expect(err).Should(BeNil()) + g.Expect(smRes.GetData().GetNdarray().Values[0].GetListValue().Values[0].GetNumberValue()).Should(Equal(0.9)) + g.Expect(smRes.GetData().GetNdarray().Values[0].GetListValue().Values[1].GetNumberValue()).Should(Equal(0.1)) + } + +} + +func TestRouter(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(okRouteResponse)) + }) + host, port, httpClient, teardown := testingHTTPClient(g, h) + defer teardown() + predictor := v1.PredictorSpec{ + Name: "test", + Annotations: map[string]string{}, + } + seldonRestClient := NewJSONRestClient(api.ProtocolSeldon, "test", &predictor, SetHTTPClient(httpClient)) + + route, err := seldonRestClient.Route(createTestContext(), "model", host, int32(port), createPayload(g), map[string][]string{}) + g.Expect(err).Should(BeNil()) + + g.Expect(route).Should(Equal(1)) +} + +func TestStatus(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(okStatusResponse)) + }) + host, port, httpClient, teardown := testingHTTPClient(g, h) + defer teardown() + predictor := v1.PredictorSpec{ + Name: "test", + Annotations: map[string]string{}, + } + seldonRestClient := NewJSONRestClient(api.ProtocolSeldon, "test", &predictor, SetHTTPClient(httpClient)) + + status, err := seldonRestClient.Status(createTestContext(), "model", host, int32(port), nil, map[string][]string{}) + g.Expect(err).Should(BeNil()) + data := string(status.GetPayload().([]byte)) + g.Expect(data).To(Equal(okStatusResponse)) +} + +func TestMetadata(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(okMetadataResponse)) + }) + host, port, httpClient, teardown := testingHTTPClient(g, h) + defer teardown() + predictor := v1.PredictorSpec{ + Name: "test", + Annotations: map[string]string{}, + } + seldonRestClient := NewJSONRestClient(api.ProtocolSeldon, "test", &predictor, SetHTTPClient(httpClient)) + + status, err := seldonRestClient.Metadata(createTestContext(), "model", host, int32(port), nil, map[string][]string{}) + g.Expect(err).Should(BeNil()) + data := string(status.GetPayload().([]byte)) + g.Expect(data).To(Equal(okMetadataResponse)) +} + +func createCombinerPayload(g *GomegaWithT) []payload.SeldonPayload { + var data = ` {"data":{"ndarray":[1.1,2.0]}}` + smp := []payload.SeldonPayload{&payload.BytesPayload{Msg: []byte(data)}, &payload.BytesPayload{Msg: []byte(data)}} + return smp +} + +func TestCombiner(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(okPredictResponse)) + }) + host, port, httpClient, teardown := testingHTTPClient(g, h) + defer teardown() + predictor := v1.PredictorSpec{ + Name: "test", + Annotations: map[string]string{}, + } + seldonRestClient := NewJSONRestClient(api.ProtocolSeldon, "test", &predictor, SetHTTPClient(httpClient)) + + resPayload, err := seldonRestClient.Combine(createTestContext(), "model", host, int32(port), createCombinerPayload(g), map[string][]string{}) + g.Expect(err).Should(BeNil()) + + data := resPayload.GetPayload().([]byte) + var smRes proto.SeldonMessage + err = jsonpb.UnmarshalString(string(data), &smRes) + g.Expect(err).Should(BeNil()) + g.Expect(smRes.GetData().GetNdarray().Values[0].GetListValue().Values[0].GetNumberValue()).Should(Equal(0.9)) + g.Expect(smRes.GetData().GetNdarray().Values[0].GetListValue().Values[1].GetNumberValue()).Should(Equal(0.1)) +} + +func TestClientMetrics(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(okPredictResponse)) + }) + host, port, httpClient, teardown := testingHTTPClient(g, h) + defer teardown() + predictor := v1.PredictorSpec{ + Name: "test", + Annotations: map[string]string{}, + ComponentSpecs: []*v1.SeldonPodSpec{ + &v1.SeldonPodSpec{ + Metadata: v1meta.ObjectMeta{}, + Spec: v12.PodSpec{ + Containers: []v12.Container{ + v12.Container{ + Name: "model", + Image: "foo:0.1", + }, + }, + }, + HpaSpec: nil, + }, + }, + } + seldonRestClient := NewJSONRestClient(api.ProtocolSeldon, "test", &predictor, SetHTTPClient(httpClient)) + + methods := []func(context.Context, string, string, int32, payload.SeldonPayload, map[string][]string) (payload.SeldonPayload, error){seldonRestClient.Predict, seldonRestClient.TransformInput, seldonRestClient.TransformOutput} + for _, method := range methods { + resPayload, err := method(createTestContext(), "model", host, int32(port), createPayload(g), map[string][]string{}) + g.Expect(err).Should(BeNil()) + + data := resPayload.GetPayload().([]byte) + var smRes proto.SeldonMessage + err = jsonpb.UnmarshalString(string(data), &smRes) + g.Expect(err).Should(BeNil()) + g.Expect(smRes.GetData().GetNdarray().Values[0].GetListValue().Values[0].GetNumberValue()).Should(Equal(0.9)) + g.Expect(smRes.GetData().GetNdarray().Values[0].GetListValue().Values[1].GetNumberValue()).Should(Equal(0.1)) + + mfs, err := prometheus.DefaultGatherer.Gather() + g.Expect(err).Should(BeNil()) + found := false + foundImage := false + foundImageVersion := false + for _, mf := range mfs { + if mf.Name != nil && *mf.Name == metric.ClientRequestsMetricName { + for _, label := range mf.Metric[0].Label { + if *label.Name == metric.ModelImageMetric && *label.Value == "foo" { + foundImage = true + } + if *label.Name == metric.ModelVersionMetric && *label.Value == "0.1" { + foundImageVersion = true + } + } + found = true + } + } + g.Expect(found).Should(Equal(true)) + g.Expect(foundImage).Should(Equal(true)) + g.Expect(foundImageVersion).Should(Equal(true)) + } + +} diff --git a/executor/api/rest/constants.go b/executor/api/rest/constants.go new file mode 100644 index 0000000000..25af42d9a9 --- /dev/null +++ b/executor/api/rest/constants.go @@ -0,0 +1,9 @@ +package rest + +const ( + TracingPredictionsName = "predictions" + TracingStatusName = "status" + TracingMetadataName = "metadata" + + LoggingRestClientName = "RestClient" +) diff --git a/executor/api/rest/server.go b/executor/api/rest/server.go new file mode 100644 index 0000000000..37e38da5dc --- /dev/null +++ b/executor/api/rest/server.go @@ -0,0 +1,254 @@ +package rest + +import ( + "context" + "fmt" + "github.com/go-logr/logr" + guuid "github.com/google/uuid" + "github.com/gorilla/mux" + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/seldonio/seldon-core/executor/api" + "github.com/seldonio/seldon-core/executor/api/client" + "github.com/seldonio/seldon-core/executor/api/metric" + "github.com/seldonio/seldon-core/executor/api/payload" + "github.com/seldonio/seldon-core/executor/predictor" + "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + "io/ioutil" + "net/http" + "net/url" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" +) + +type SeldonRestApi struct { + Router *mux.Router + Client client.SeldonApiClient + predictor *v1.PredictorSpec + Log logr.Logger + ProbesOnly bool + ServerUrl *url.URL + Namespace string + Protocol string + DeploymentName string + metrics *metric.ServerMetrics + prometheusPath string +} + +func NewServerRestApi(predictor *v1.PredictorSpec, client client.SeldonApiClient, probesOnly bool, serverUrl *url.URL, namespace string, protocol string, deploymentName string, prometheusPath string) *SeldonRestApi { + var serverMetrics *metric.ServerMetrics + if !probesOnly { + serverMetrics = metric.NewServerMetrics(predictor, deploymentName) + } + return &SeldonRestApi{ + mux.NewRouter(), + client, + predictor, + logf.Log.WithName("SeldonRestApi"), + probesOnly, + serverUrl, + namespace, + protocol, + deploymentName, + serverMetrics, + prometheusPath, + } +} + +func (r *SeldonRestApi) respondWithSuccess(w http.ResponseWriter, code int, payload payload.SeldonPayload) { + w.Header().Set("Content-Type", payload.GetContentType()) + w.WriteHeader(code) + + err := r.Client.Marshall(w, payload) + if err != nil { + r.Log.Error(err, "Failed to write response") + } +} + +func (r *SeldonRestApi) respondWithError(w http.ResponseWriter, err error) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusInternalServerError) + + errPayload := r.Client.CreateErrorPayload(err) + err = r.Client.Marshall(w, errPayload) + if err != nil { + r.Log.Error(err, "Failed to write error payload") + } +} + +func (r *SeldonRestApi) wrapMetrics(service string, baseHandler http.HandlerFunc) http.HandlerFunc { + + handler := promhttp.InstrumentHandlerDuration( + r.metrics.ServerHandledHistogram.MustCurryWith(prometheus.Labels{ + metric.DeploymentNameMetric: r.DeploymentName, + metric.PredictorNameMetric: r.predictor.Name, + metric.PredictorVersionMetric: r.predictor.Annotations["version"], + metric.ServiceMetric: service}), + baseHandler, + ) + return handler +} + +func (r *SeldonRestApi) Initialise() { + r.Router.HandleFunc("/ready", r.checkReady) + r.Router.HandleFunc("/live", r.alive) + r.Router.Handle(r.prometheusPath, promhttp.Handler()) + if !r.ProbesOnly { + r.Router.Use(puidHeader) + switch r.Protocol { + case api.ProtocolSeldon: + //v0.1 API + api01 := r.Router.PathPrefix("/api/v0.1").Methods("POST").Subrouter() + api01.Handle("/predictions", r.wrapMetrics(metric.PredictionHttpServiceName, r.predictions)) + r.Router.NewRoute().Path("/api/v0.1/status/{" + ModelHttpPathVariable + "}").Methods("GET").HandlerFunc(r.wrapMetrics(metric.StatusHttpServiceName, r.status)) + r.Router.NewRoute().Path("/api/v0.1/metadata/{" + ModelHttpPathVariable + "}").Methods("GET").HandlerFunc(r.wrapMetrics(metric.StatusHttpServiceName, r.metadata)) + //v1.0 API + api1 := r.Router.PathPrefix("/api/v1.0").Methods("POST").Subrouter() + api1.Handle("/predictions", r.wrapMetrics(metric.PredictionServiceMetricName, r.predictions)) + r.Router.NewRoute().Path("/api/v1.0/status/{" + ModelHttpPathVariable + "}").Methods("GET").HandlerFunc(r.wrapMetrics(metric.StatusHttpServiceName, r.status)) + r.Router.NewRoute().Path("/api/v1.0/metadata/{" + ModelHttpPathVariable + "}").Methods("GET").HandlerFunc(r.wrapMetrics(metric.StatusHttpServiceName, r.metadata)) + + case api.ProtocolTensorflow: + r.Router.NewRoute().Path("/v1/models/{" + ModelHttpPathVariable + "}/:predict").Methods("POST").HandlerFunc(r.wrapMetrics(metric.PredictionHttpServiceName, r.predictions)) + r.Router.NewRoute().Path("/v1/models/{" + ModelHttpPathVariable + "}").Methods("GET").HandlerFunc(r.wrapMetrics(metric.StatusHttpServiceName, r.status)) + r.Router.NewRoute().Path("/v1/models/{" + ModelHttpPathVariable + "}/metadata").Methods("GET").HandlerFunc(r.wrapMetrics(metric.MetadataHttpServiceName, r.metadata)) + } + } +} + +func puidHeader(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if puid := r.Header.Get(payload.SeldonPUIDHeader); puid == "" { + r.Header.Set(payload.SeldonPUIDHeader, guuid.New().String()) + } + next.ServeHTTP(w, r) + }) +} + +func (r *SeldonRestApi) checkReady(w http.ResponseWriter, req *http.Request) { + err := predictor.Ready(r.predictor.Graph) + if err != nil { + r.Log.Error(err, "Ready check failed") + w.WriteHeader(http.StatusServiceUnavailable) + } else { + w.WriteHeader(http.StatusOK) + } +} + +func (r *SeldonRestApi) alive(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusOK) +} + +func (r *SeldonRestApi) failWithError(w http.ResponseWriter, err error) { + r.Log.Error(err, "Failed") + r.respondWithError(w, err) +} + +func getGraphNodeForModelName(req *http.Request, graph *v1.PredictiveUnit) (*v1.PredictiveUnit, error) { + vars := mux.Vars(req) + modelName := vars[ModelHttpPathVariable] + if graphNode := v1.GetPredictiveUnit(graph, modelName); graphNode == nil { + return nil, fmt.Errorf("Failed to find model %s", modelName) + } else { + return graphNode, nil + } +} + +func setupTracing(ctx context.Context, req *http.Request, spanName string) (context.Context, opentracing.Span) { + tracer := opentracing.GlobalTracer() + spanCtx, _ := tracer.Extract(opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(req.Header)) + serverSpan := tracer.StartSpan(spanName, ext.RPCServerOption(spanCtx)) + ctx = opentracing.ContextWithSpan(ctx, serverSpan) + return ctx, serverSpan +} + +func (r *SeldonRestApi) metadata(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + + // Apply tracing if active + if opentracing.IsGlobalTracerRegistered() { + var serverSpan opentracing.Span + ctx, serverSpan = setupTracing(ctx, req, TracingMetadataName) + defer serverSpan.Finish() + } + + vars := mux.Vars(req) + modelName := vars[ModelHttpPathVariable] + + seldonPredictorProcess := predictor.NewPredictorProcess(ctx, r.Client, logf.Log.WithName(LoggingRestClientName), r.ServerUrl, r.Namespace, req.Header) + resPayload, err := seldonPredictorProcess.Metadata(r.predictor.Graph, modelName, nil) + if err != nil { + r.failWithError(w, err) + return + } + r.respondWithSuccess(w, http.StatusOK, resPayload) +} + +func (r *SeldonRestApi) status(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + + // Apply tracing if active + if opentracing.IsGlobalTracerRegistered() { + var serverSpan opentracing.Span + ctx, serverSpan = setupTracing(ctx, req, TracingStatusName) + defer serverSpan.Finish() + } + + vars := mux.Vars(req) + modelName := vars[ModelHttpPathVariable] + + seldonPredictorProcess := predictor.NewPredictorProcess(ctx, r.Client, logf.Log.WithName(LoggingRestClientName), r.ServerUrl, r.Namespace, req.Header) + resPayload, err := seldonPredictorProcess.Status(r.predictor.Graph, modelName, nil) + if err != nil { + r.failWithError(w, err) + return + } + r.respondWithSuccess(w, http.StatusOK, resPayload) +} + +func (r *SeldonRestApi) predictions(w http.ResponseWriter, req *http.Request) { + r.Log.Info("Predictions called") + + ctx := req.Context() + // Add Seldon Puid to Context + ctx = context.WithValue(ctx, payload.SeldonPUIDHeader, req.Header.Get(payload.SeldonPUIDHeader)) + + // Apply tracing if active + if opentracing.IsGlobalTracerRegistered() { + var serverSpan opentracing.Span + ctx, serverSpan = setupTracing(ctx, req, TracingPredictionsName) + defer serverSpan.Finish() + } + + bodyBytes, err := ioutil.ReadAll(req.Body) + if err != nil { + r.failWithError(w, err) + return + } + + seldonPredictorProcess := predictor.NewPredictorProcess(ctx, r.Client, logf.Log.WithName(LoggingRestClientName), r.ServerUrl, r.Namespace, req.Header) + + reqPayload, err := seldonPredictorProcess.Client.Unmarshall(bodyBytes) + if err != nil { + r.failWithError(w, err) + return + } + + var graphNode *v1.PredictiveUnit + if r.Protocol == api.ProtocolTensorflow { + graphNode, err = getGraphNodeForModelName(req, r.predictor.Graph) + if err != nil { + r.failWithError(w, err) + return + } + } else { + graphNode = r.predictor.Graph + } + resPayload, err := seldonPredictorProcess.Predict(graphNode, reqPayload) + if err != nil { + r.failWithError(w, err) + return + } + r.respondWithSuccess(w, http.StatusOK, resPayload) +} diff --git a/executor/api/rest/server_test.go b/executor/api/rest/server_test.go new file mode 100644 index 0000000000..fa3b23de1f --- /dev/null +++ b/executor/api/rest/server_test.go @@ -0,0 +1,268 @@ +package rest + +import ( + . "github.com/onsi/gomega" + "github.com/prometheus/common/expfmt" + "github.com/seldonio/seldon-core/executor/api" + "github.com/seldonio/seldon-core/executor/api/metric" + "github.com/seldonio/seldon-core/executor/api/payload" + "github.com/seldonio/seldon-core/executor/api/test" + v1 "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "strings" + "testing" +) + +const ( + TestSeldonPuid = "1" +) + +func TestAliveEndpoint(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + + url, _ := url.Parse("http://localhost") + r := NewServerRestApi(nil, nil, true, url, "default", api.ProtocolSeldon, "test", "/metrics") + r.Initialise() + + req, _ := http.NewRequest("GET", "/live", nil) + res := httptest.NewRecorder() + r.Router.ServeHTTP(res, req) + + g.Expect(res.Code).To(Equal(200)) +} + +func TestSimpleModel(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + + model := v1.MODEL + p := v1.PredictorSpec{ + Name: "p", + Graph: &v1.PredictiveUnit{ + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + }, + } + + url, _ := url.Parse("http://localhost") + r := NewServerRestApi(&p, test.NewSeldonMessageTestClient(t, 0, nil, nil), false, url, "default", api.ProtocolSeldon, "test", "/metrics") + r.Initialise() + var data = ` {"data":{"ndarray":[1.1,2.0]}}` + + req, _ := http.NewRequest("POST", "/api/v0.1/predictions", strings.NewReader(data)) + req.Header = map[string][]string{"Content-Type": []string{"application/json"}} + res := httptest.NewRecorder() + r.Router.ServeHTTP(res, req) + g.Expect(res.Code).To(Equal(200)) +} + +func TestModelWithServer(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + called := false + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + bodyBytes, err := ioutil.ReadAll(r.Body) + g.Expect(err).To(BeNil()) + g.Expect(r.Header.Get(payload.SeldonPUIDHeader)).To(Equal(TestSeldonPuid)) + called = true + w.Write([]byte(bodyBytes)) + }) + server := httptest.NewServer(handler) + defer server.Close() + url, err := url.Parse(server.URL) + g.Expect(err).Should(BeNil()) + urlParts := strings.Split(url.Host, ":") + port, err := strconv.Atoi(urlParts[1]) + g.Expect(err).Should(BeNil()) + + model := v1.MODEL + p := v1.PredictorSpec{ + Name: "p", + Graph: &v1.PredictiveUnit{ + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: urlParts[0], + ServicePort: int32(port), + Type: v1.REST, + }, + }, + } + + r := NewServerRestApi(&p, NewJSONRestClient(api.ProtocolSeldon, "dep", &p), false, url, "default", api.ProtocolSeldon, "test", "/metrics") + r.Initialise() + var data = ` {"data":{"ndarray":[1.1,2.0]}}` + + req, _ := http.NewRequest("POST", "/api/v0.1/predictions", strings.NewReader(data)) + req.Header = map[string][]string{"Content-Type": []string{"application/json"}, payload.SeldonPUIDHeader: []string{TestSeldonPuid}} + res := httptest.NewRecorder() + r.Router.ServeHTTP(res, req) + g.Expect(res.Code).To(Equal(200)) + g.Expect(called).To(Equal(true)) + +} + +func TestServerMetrics(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + + model := v1.MODEL + p := v1.PredictorSpec{ + Name: "p", + Graph: &v1.PredictiveUnit{ + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + }, + } + + url, _ := url.Parse("http://localhost") + r := NewServerRestApi(&p, test.NewSeldonMessageTestClient(t, 0, nil, nil), false, url, "default", api.ProtocolSeldon, "test", "/metrics") + r.Initialise() + + var data = ` {"data":{"ndarray":[1.1,2.0]}}` + + req, _ := http.NewRequest("POST", "/api/v0.1/predictions", strings.NewReader(data)) + req.Header = map[string][]string{"Content-Type": []string{"application/json"}} + res := httptest.NewRecorder() + r.Router.ServeHTTP(res, req) + g.Expect(res.Code).To(Equal(200)) + + req, _ = http.NewRequest("GET", "/metrics", nil) + res = httptest.NewRecorder() + r.Router.ServeHTTP(res, req) + g.Expect(res.Code).To(Equal(200)) + tp := expfmt.TextParser{} + metrics, err := tp.TextToMetricFamilies(res.Body) + g.Expect(err).Should(BeNil()) + g.Expect(metrics[metric.ServerRequestsMetricName]).ShouldNot(BeNil()) + +} + +func TestTensorflowStatus(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + + model := v1.MODEL + p := v1.PredictorSpec{ + Name: "p", + Graph: &v1.PredictiveUnit{ + Name: "mymodel", + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + }, + } + + url, _ := url.Parse("http://localhost") + r := NewServerRestApi(&p, test.NewSeldonMessageTestClient(t, 0, nil, nil), false, url, "default", api.ProtocolTensorflow, "test", "/metrics") + r.Initialise() + + req, _ := http.NewRequest("GET", "/v1/models/mymodel", nil) + res := httptest.NewRecorder() + r.Router.ServeHTTP(res, req) + g.Expect(res.Code).To(Equal(200)) + g.Expect(res.Body.String()).To(Equal(test.TestClientStatusResponse)) +} + +func TestSeldonStatus(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + + model := v1.MODEL + p := v1.PredictorSpec{ + Name: "p", + Graph: &v1.PredictiveUnit{ + Name: "mymodel", + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + }, + } + + url, _ := url.Parse("http://localhost") + r := NewServerRestApi(&p, test.NewSeldonMessageTestClient(t, 0, nil, nil), false, url, "default", api.ProtocolSeldon, "test", "/metrics") + r.Initialise() + + req, _ := http.NewRequest("GET", "/api/v1.0/status/mymodel", nil) + res := httptest.NewRecorder() + r.Router.ServeHTTP(res, req) + g.Expect(res.Code).To(Equal(200)) + g.Expect(res.Body.String()).To(Equal(test.TestClientStatusResponse)) +} + +func TestSeldonMetadata(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + + model := v1.MODEL + p := v1.PredictorSpec{ + Name: "p", + Graph: &v1.PredictiveUnit{ + Name: "mymodel", + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + }, + } + + url, _ := url.Parse("http://localhost") + r := NewServerRestApi(&p, test.NewSeldonMessageTestClient(t, 0, nil, nil), false, url, "default", api.ProtocolSeldon, "test", "/metrics") + r.Initialise() + + req, _ := http.NewRequest("GET", "/api/v1.0/metadata/mymodel", nil) + res := httptest.NewRecorder() + r.Router.ServeHTTP(res, req) + g.Expect(res.Code).To(Equal(200)) + g.Expect(res.Body.String()).To(Equal(test.TestClientMetadataResponse)) +} + +func TestTensorflowMetadata(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + + model := v1.MODEL + p := v1.PredictorSpec{ + Name: "p", + Graph: &v1.PredictiveUnit{ + Name: "mymodel", + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + }, + } + + url, _ := url.Parse("http://localhost") + r := NewServerRestApi(&p, test.NewSeldonMessageTestClient(t, 0, nil, nil), false, url, "default", api.ProtocolTensorflow, "test", "/metrics") + r.Initialise() + + req, _ := http.NewRequest("GET", "/v1/models/mymodel/metadata", nil) + res := httptest.NewRecorder() + r.Router.ServeHTTP(res, req) + g.Expect(res.Code).To(Equal(200)) + g.Expect(res.Body.String()).To(Equal(test.TestClientMetadataResponse)) +} diff --git a/executor/api/rest/tensorflow.go b/executor/api/rest/tensorflow.go new file mode 100644 index 0000000000..c50a5b7812 --- /dev/null +++ b/executor/api/rest/tensorflow.go @@ -0,0 +1,35 @@ +package rest + +import ( + "encoding/json" + "github.com/pkg/errors" + "github.com/seldonio/seldon-core/executor/api/payload" +) + +const ( + ModelHttpPathVariable = "model" +) + +func ChainTensorflow(msg payload.SeldonPayload) (payload.SeldonPayload, error) { + var f interface{} + err := json.Unmarshal(msg.GetPayload().([]byte), &f) + if err != nil { + return nil, err + } + m := f.(map[string]interface{}) + if _, ok := m["instances"]; ok { + return msg, nil + } else if _, ok := m["predictions"]; ok { + m["instances"] = m["predictions"] + delete(m, "predictions") + b, err := json.Marshal(m) + if err != nil { + return nil, err + } else { + p := payload.BytesPayload{Msg: b} + return &p, nil + } + } else { + return nil, errors.Errorf("Failed to convert tensorflow response so it could be chained to new input") + } +} diff --git a/executor/api/rest/utils.go b/executor/api/rest/utils.go new file mode 100644 index 0000000000..5353454dee --- /dev/null +++ b/executor/api/rest/utils.go @@ -0,0 +1,16 @@ +package rest + +import ( + "encoding/json" +) + +// Assumes the byte array is a json list of ints +func ExtractRouteAsJsonArray(msg []byte) ([]int, error) { + var routes []int + err := json.Unmarshal(msg, &routes) + if err == nil { + return routes, err + } else { + return nil, err + } +} diff --git a/executor/api/rest/utils_test.go b/executor/api/rest/utils_test.go new file mode 100644 index 0000000000..dca06b5f76 --- /dev/null +++ b/executor/api/rest/utils_test.go @@ -0,0 +1,15 @@ +package rest + +import ( + . "github.com/onsi/gomega" + "testing" +) + +func TestConversions(t *testing.T) { + g := NewGomegaWithT(t) + val := "[1,2]" + arr, err := ExtractRouteAsJsonArray([]byte(val)) + g.Expect(err).Should(BeNil()) + g.Expect(arr[0]).Should(Equal(1)) + g.Expect(arr[1]).Should(Equal(2)) +} diff --git a/executor/api/test/seldonmessage_test_client.go b/executor/api/test/seldonmessage_test_client.go new file mode 100644 index 0000000000..9563893002 --- /dev/null +++ b/executor/api/test/seldonmessage_test_client.go @@ -0,0 +1,102 @@ +package test + +import ( + "context" + "github.com/seldonio/seldon-core/executor/api/client" + "github.com/seldonio/seldon-core/executor/api/grpc/seldon/proto" + "github.com/seldonio/seldon-core/executor/api/payload" + "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + "io" + "net/http" + "testing" +) + +type SeldonMessageTestClient struct { + t *testing.T + chosenRoute int + errMethod *v1.PredictiveUnitMethod + err error +} + +const ( + TestClientStatusResponse = `{"status":"ok"}` + TestClientMetadataResponse = `{"metadata":{"name":"mymodel"}}` +) + +func (s SeldonMessageTestClient) Status(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + return &payload.BytesPayload{Msg: []byte(TestClientStatusResponse)}, nil +} + +func (s SeldonMessageTestClient) Metadata(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + return &payload.BytesPayload{Msg: []byte(TestClientMetadataResponse)}, nil +} + +func (s SeldonMessageTestClient) Chain(ctx context.Context, modelName string, msg payload.SeldonPayload) (payload.SeldonPayload, error) { + return msg, nil +} + +func (s SeldonMessageTestClient) Unmarshall(msg []byte) (payload.SeldonPayload, error) { + reqPayload := payload.BytesPayload{Msg: msg, ContentType: "application/json"} + return &reqPayload, nil +} + +func (s SeldonMessageTestClient) Marshall(out io.Writer, msg payload.SeldonPayload) error { + _, err := out.Write(msg.GetPayload().([]byte)) + return err +} + +func (s SeldonMessageTestClient) CreateErrorPayload(err error) payload.SeldonPayload { + respFailed := proto.SeldonMessage{Status: &proto.Status{Code: http.StatusInternalServerError, Info: err.Error()}} + res := payload.ProtoPayload{Msg: &respFailed} + return &res +} + +func (s SeldonMessageTestClient) Predict(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + s.t.Logf("Predict %s %d", host, port) + if s.errMethod != nil && *s.errMethod == v1.TRANSFORM_INPUT { + return nil, s.err + } + return msg, nil +} + +func (s SeldonMessageTestClient) TransformInput(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + s.t.Logf("TransformInput %s %d", host, port) + if s.errMethod != nil && *s.errMethod == v1.TRANSFORM_INPUT { + return nil, s.err + } + return msg, nil +} + +func (s SeldonMessageTestClient) Route(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (int, error) { + s.t.Logf("Route %s %d", host, port) + return s.chosenRoute, nil +} + +func (s SeldonMessageTestClient) Combine(ctx context.Context, modelName string, host string, port int32, msgs []payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + s.t.Logf("Combine %s %d", host, port) + return msgs[0], nil +} + +func (s SeldonMessageTestClient) TransformOutput(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + s.t.Logf("TransformOutput %s %d", host, port) + return msg, nil +} + +func (s SeldonMessageTestClient) Feedback(ctx context.Context, modelName string, host string, port int32, msg payload.SeldonPayload, meta map[string][]string) (payload.SeldonPayload, error) { + s.t.Logf("Feedback %s %d", host, port) + if s.errMethod != nil && *s.errMethod == v1.SEND_FEEDBACK { + return nil, s.err + } + resp := &payload.ProtoPayload{Msg: msg.GetPayload().(*proto.Feedback).Request} + return resp, nil +} + +func NewSeldonMessageTestClient(t *testing.T, chosenRoute int, errMethod *v1.PredictiveUnitMethod, err error) client.SeldonApiClient { + client := SeldonMessageTestClient{ + t: t, + chosenRoute: chosenRoute, + errMethod: errMethod, + err: err, + } + return &client +} diff --git a/executor/api/util/utils.go b/executor/api/util/utils.go new file mode 100644 index 0000000000..9309d65e02 --- /dev/null +++ b/executor/api/util/utils.go @@ -0,0 +1,32 @@ +package util + +import ( + "github.com/seldonio/seldon-core/executor/api/grpc/seldon/proto" +) + +func ExtractRouteFromSeldonMessage(msg *proto.SeldonMessage) []int { + switch msg.GetData().DataOneof.(type) { + case *proto.DefaultData_Ndarray: + values := msg.GetData().GetNdarray().GetValues() + routeArr := make([]int, len(values)) + for i, value := range values { + routeArr[i] = int(value.GetNumberValue()) + } + return routeArr + case *proto.DefaultData_Tensor: + values := msg.GetData().GetTensor().Values + routeArr := make([]int, len(values)) + for i, value := range values { + routeArr[i] = int(value) + } + return routeArr + case *proto.DefaultData_Tftensor: + values := msg.GetData().GetTftensor().GetIntVal() + routeArr := make([]int, len(values)) + for i, value := range values { + routeArr[i] = int(value) + } + return routeArr + } + return []int{-1} +} diff --git a/executor/go.mod b/executor/go.mod new file mode 100644 index 0000000000..6eefe7b941 --- /dev/null +++ b/executor/go.mod @@ -0,0 +1,35 @@ +module github.com/seldonio/seldon-core/executor + +go 1.12 + +require ( + github.com/cloudevents/sdk-go v0.10.2 + github.com/ghodss/yaml v1.0.0 + github.com/go-logr/logr v0.1.0 + github.com/go-stack/stack v1.8.0 // indirect + github.com/golang/protobuf v1.3.2 + github.com/google/uuid v1.1.1 + github.com/gorilla/mux v1.7.3 + github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 + github.com/onsi/gomega v1.7.0 + github.com/opentracing/opentracing-go v1.1.0 + github.com/pkg/errors v0.8.1 + github.com/prometheus/client_golang v1.3.0 + github.com/prometheus/common v0.7.0 + github.com/seldonio/seldon-core/operator v0.0.0-20191223100430-3b372610589b + github.com/tensorflow/tensorflow v1.14.0 // indirect + github.com/tensorflow/tensorflow/tensorflow/go/core v0.0.0-00010101000000-000000000000 + github.com/uber/jaeger-client-go v2.21.1+incompatible + github.com/uber/jaeger-lib v2.2.0+incompatible // indirect + golang.org/x/net v0.0.0-20190926025831-c00fd9afed17 // indirect + golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 + google.golang.org/grpc v1.24.0 + gotest.tools v2.2.0+incompatible + k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b + k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d + k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible + sigs.k8s.io/controller-runtime v0.2.2 +) + +replace github.com/tensorflow/tensorflow/tensorflow/go/core => ./proto/tensorflow/core diff --git a/executor/go.sum b/executor/go.sum new file mode 100644 index 0000000000..e70e4053f3 --- /dev/null +++ b/executor/go.sum @@ -0,0 +1,461 @@ +cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.40.0/go.mod h1:Tk58MuI9rbLMKlAjeO/bDnteAx7tX2gJIXw4T5Jwlro= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A= +github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-autorest/autorest v0.2.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= +github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudevents/sdk-go v0.10.2 h1:CAqHqDHmBkCG4OUeUBt7q2Ql8KV25U+bgPUtlcJelZ4= +github.com/cloudevents/sdk-go v0.10.2/go.mod h1:EHG6NmU3XkIeuueER6+vbnhYfWlgVlfUQVzPC+UK7ao= +github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= +github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= +github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 h1:u4bArs140e9+AfE52mFHOXVFnOSBJBRlzTHrOPLOIhE= +github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= +github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= +github.com/gophercloud/gophercloud v0.4.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg= +github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw= +github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c h1:MUyE44mTvnI5A0xrxIxaMqoWFzPfQvtE2IWUollMDMs= +github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.0 h1:tXuTFVHC03mW0D+Ua1Q2d1EAVqLTuggX50V0VLICCzY= +github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0 h1:miYCvYqFXtl/J9FIy8eNpBfYthAEFg+Ys0XyUVEcDsc= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0 h1:ElTg5tNp4DqfV7UQjDqv2+RJlNzsDtvNAWccbItceIE= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/prom2json v1.3.0 h1:BlqrtbT9lLH3ZsOVhXPsHzFrApCTKRifB7gjJuypu6Y= +github.com/prometheus/prom2json v1.3.0/go.mod h1:rMN7m0ApCowcoDlypBHlkNbp5eJQf/+1isKykIP5ZnM= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/seldonio/seldon-core v0.4.1 h1:CeesBeUYpvDY7/nr7OahxUnNM++g5b7uulhiWlqdUVE= +github.com/seldonio/seldon-core v1.0.0 h1:eJCF4tYqS4uvFiMLpxKbRhKESfLU6BBK/yTGDaX6hXI= +github.com/seldonio/seldon-core/operator v0.0.0-20191220103229-cbbcc5aed888 h1:fr9rzUWXPsSPEFEXiTLkPVL3NNFQ0DeFOsIKtMUhzqI= +github.com/seldonio/seldon-core/operator v0.0.0-20191220103229-cbbcc5aed888/go.mod h1:Z1WNnJukibShiHq8+mOpLpIDygzbcqg2nA+pNq8vrW0= +github.com/seldonio/seldon-core/operator v0.0.0-20191223100430-3b372610589b h1:6TG2KvNQPoBkiCFbiXP15qY9LKPfsMwkB6Kz3hdpWsg= +github.com/seldonio/seldon-core/operator v0.0.0-20191223100430-3b372610589b/go.mod h1:Z1WNnJukibShiHq8+mOpLpIDygzbcqg2nA+pNq8vrW0= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/tensorflow/tensorflow v1.14.0 h1:g0W2+f/RybcvmrTjPLTwXkfr/BsDGUd8FKT6ZzojOMo= +github.com/tensorflow/tensorflow v1.14.0/go.mod h1:itOSERT4trABok4UOoG+X4BoKds9F3rIsySdn+Lvu90= +github.com/uber/jaeger-client-go v2.21.1+incompatible h1:oozboeZmWz+tyh3VZttJWlF3K73mHgbokieceqKccLo= +github.com/uber/jaeger-client-go v2.21.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw= +github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190926025831-c00fd9afed17 h1:qPnAdmjNA41t3QBTx2mFGf/SD1IoslhYu7AmdsVzCcs= +golang.org/x/net v0.0.0-20190926025831-c00fd9afed17/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190621203818-d432491b9138 h1:t8BZD9RDjkm9/h7yYN6kE8oaeov5r9aztkB7zKA5Tkg= +golang.org/x/sys v0.0.0-20190621203818-d432491b9138/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e h1:D5TXcfTk7xF7hvieo4QErS3qqCB4teTffacDWr7CI+0= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f h1:68K/z8GLUxV76xGSqwTWw2gyk/jwn79LUL43rES2g8o= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190501045030-23463209683d/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= +gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= +google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101 h1:wuGevabY6r+ivPNagjUXGGxF+GqgMd+dBhjsxW4q9u4= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b h1:aBGgKJUM9Hk/3AE8WaZIApnTxG35kbuQba2w+SXqezo= +k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 h1:q1Qvjzs/iEdXF6A1a8H3AKVFDzJNcJn3nXMs6R6qFtA= +k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= +k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d h1:Jmdtdt1ZnoGfWWIIik61Z7nKYgO3J+swQJtPYsP9wHA= +k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible h1:U5Bt+dab9K8qaUmXINrkXO135kA11/i5Kg1RUydgaMQ= +k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.3 h1:niceAagH1tzskmaie/icWd7ci1wbG7Bf2c6YGcQv+3c= +k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c h1:3KSCztE7gPitlZmWbNwue/2U0YruD65DqX3INopDAQM= +k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208 h1:5sW+fEHvlJI3Ngolx30CmubFulwH28DhKjGf70Xmtco= +k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= +k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 h1:VBM/0P5TWxwk+Nw6Z+lAw3DKgO76g90ETOiA6rfLV1Y= +k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +knative.dev/pkg v0.0.0-20190823221514-39a29cf1bf26 h1:Sz/R5k+PufXlGqMp8Z/BKvENj+XIeaQc68TpMPGhKxo= +knative.dev/pkg v0.0.0-20190823221514-39a29cf1bf26/go.mod h1:pgODObA1dTyhNoFxPZTTjNWfx6F0aKsKzn+vaT9XO/Q= +pack.ag/amqp v0.11.0/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/controller-runtime v0.2.0/go.mod h1:ZHqrRDZi3f6BzONcvlUxkqCKgwasGk5FZrnSv9TVZF4= +sigs.k8s.io/controller-runtime v0.2.2 h1:JT/vJJhUjjL9NZNwnm8AXmqCBUXSCFKmTaNjwDi28N0= +sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I= +sigs.k8s.io/controller-tools v0.2.0/go.mod h1:8t/X+FVWvk6TaBcsa+UKUBbn7GMtvyBKX30SGl4em6Y= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs= +sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/executor/hack/boilerplate.go.txt b/executor/hack/boilerplate.go.txt new file mode 100644 index 0000000000..9428d73669 --- /dev/null +++ b/executor/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2019 The Seldon Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ diff --git a/executor/hack/tools/README.md b/executor/hack/tools/README.md new file mode 100644 index 0000000000..f12ab9100b --- /dev/null +++ b/executor/hack/tools/README.md @@ -0,0 +1,2 @@ +This directory contains a stub go module used to track version of development +tools like the Kubernetes code generators. \ No newline at end of file diff --git a/executor/hack/tools/go.mod b/executor/hack/tools/go.mod new file mode 100644 index 0000000000..8343ce9058 --- /dev/null +++ b/executor/hack/tools/go.mod @@ -0,0 +1,33 @@ +module sigs.k8s.io/kind/hack/tools + +go 1.13 + +require ( + github.com/golangci/golangci-lint v1.17.2-0.20190714145355-d2b1eea2c617 + github.com/spf13/pflag v1.0.3 // indirect + golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529 // indirect + golang.org/x/tools v0.0.0-20190606174628-0139d5756a7d // indirect + k8s.io/code-generator v0.0.0-20190311093542-50b561225d70 + k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a // indirect + k8s.io/klog v0.3.0 // indirect +) + +// deal with golangci-lint being broken + +replace github.com/golangci/errcheck v0.0.0-20181003203344-ef45e06d44b6 => github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6 + +replace github.com/golangci/go-tools v0.0.0-20180109140146-af6baa5dc196 => github.com/golangci/go-tools v0.0.0-20190318060251-af6baa5dc196 + +replace github.com/golangci/gofmt v0.0.0-20181105071733-0b8337e80d98 => github.com/golangci/gofmt v0.0.0-20181222123516-0b8337e80d98 + +replace github.com/golangci/gosec v0.0.0-20180901114220-66fb7fc33547 => github.com/golangci/gosec v0.0.0-20190211064107-66fb7fc33547 + +replace github.com/golangci/lint-1 v0.0.0-20180610141402-ee948d087217 => github.com/golangci/lint-1 v0.0.0-20190420132249-ee948d087217 + +replace github.com/go-critic/go-critic v0.0.0-20181204210945-1df300866540 => github.com/go-critic/go-critic v0.0.0-20190526074819-1df300866540 + +replace mvdan.cc/unparam v0.0.0-20190124213536-fbb59629db34 => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34 + +replace github.com/golangci/ineffassign v0.0.0-20180808204949-42439a7714cc => github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc + +replace github.com/timakin/bodyclose => github.com/golangci/bodyclose v0.0.0-20190714144026-65da19158fa2 diff --git a/executor/hack/tools/go.sum b/executor/hack/tools/go.sum new file mode 100644 index 0000000000..34552d53b2 --- /dev/null +++ b/executor/hack/tools/go.sum @@ -0,0 +1,235 @@ +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/OpenPeeDeeP/depguard v0.0.0-20180806142446-a69c782687b2 h1:HTOmFEEYrWi4MW5ZKUx6xfeyM10Sx3kQF65xiQJMPYA= +github.com/OpenPeeDeeP/depguard v0.0.0-20180806142446-a69c782687b2/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= +github.com/OpenPeeDeeP/depguard v1.0.0 h1:k9QF73nrHT3nPLz3lu6G5s+3Hi8Je36ODr1F5gjAXXM= +github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.6.0 h1:66qjqZk8kalYAvDRtM1AdAJQI0tj4Wrue3Eq3B3pmFU= +github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-critic/go-critic v0.0.0-20190526074819-1df300866540 h1:aTxtKFxZ1TqCCYkrQE6Si8qIm/2+58VSJWurTgEPVHE= +github.com/go-critic/go-critic v0.0.0-20190526074819-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= +github.com/go-lintpack/lintpack v0.5.2 h1:DI5mA3+eKdWeJ40nU4d6Wc26qmdG8RCi/btYq0TuRN0= +github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= +github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g= +github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +github.com/go-toolsmith/astcopy v1.0.0 h1:OMgl1b1MEpjFQ1m5ztEO06rz5CUd3oBv9RF7+DyvdG8= +github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= +github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astequal v1.0.0 h1:4zxD8j3JRFNyLN46lodQuqz3xdKSrur7U/sr0SDS/gQ= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= +github.com/go-toolsmith/astfmt v1.0.0 h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k= +github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= +github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= +github.com/go-toolsmith/astp v1.0.0 h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg= +github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= +github.com/go-toolsmith/pkgload v1.0.0 h1:4DFWWMXVfbcN5So1sBNW9+yeiMqLFGl1wFLTL5R0Tgg= +github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= +github.com/go-toolsmith/strparse v1.0.0 h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/typep v1.0.0 h1:zKymWyA1TRYvqYrYDrfEMZULyrhcnGY3x7LDKU2XQaA= +github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/mock v1.0.0 h1:HzcpUG60pfl43n9d2qbdi/3l1uKpAmxlfWEPWtV/QxM= +github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golangci/bodyclose v0.0.0-20190714144026-65da19158fa2 h1:nh/PMhIaxu+h8NOuhOwT2el9Ed08166oitASyNYqQzs= +github.com/golangci/bodyclose v0.0.0-20190714144026-65da19158fa2/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6 h1:YYWNAGTKWhKpcLLt7aSj/odlKrSrelQwlovBpDuf19w= +github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZBf8NjltjWihK2QfBBBZuv91cMFfDHw= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= +github.com/golangci/go-tools v0.0.0-20190318060251-af6baa5dc196 h1:Y8tnIoL0ZQEnVn+SedetVzw1JRsGvjnOemI+oTFCpow= +github.com/golangci/go-tools v0.0.0-20190318060251-af6baa5dc196/go.mod h1:unzUULGw35sjyOYjUt0jMTXqHlZPpPc6e+xfO4cd6mM= +github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3 h1:pe9JHs3cHHDQgOFXJJdYkK6fLz2PWyYtP4hthoCMvs8= +github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= +github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee h1:J2XAy40+7yz70uaOiMbNnluTg7gyQhtGqLQncQh+4J8= +github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= +github.com/golangci/gofmt v0.0.0-20181222123516-0b8337e80d98 h1:0OkFarm1Zy2CjCiDKfK9XHgmc2wbDlRMD2hD8anAJHU= +github.com/golangci/gofmt v0.0.0-20181222123516-0b8337e80d98/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.17.1 h1:lc8Hf9GPCjIr0hg3S/xhvFT1+Hydass8F1xchr8jkME= +github.com/golangci/golangci-lint v1.17.1/go.mod h1:+5sJSl2h3aly+fpmL2meSP8CaSKua2E4Twi9LPy7b1g= +github.com/golangci/golangci-lint v1.17.2-0.20190714145355-d2b1eea2c617 h1:B+d1PF6W3O+8rgnp+vsuWpsVKfdCplAGFihfsee3ukc= +github.com/golangci/golangci-lint v1.17.2-0.20190714145355-d2b1eea2c617/go.mod h1:gR9aY959aWjiEg3Tw/+L9cGIGb4UysZwT6cComANQ0s= +github.com/golangci/gosec v0.0.0-20190211064107-66fb7fc33547 h1:fUdgm/BdKvwOHxg5AhNbkNRp2mSy8sxTXyBVs/laQHo= +github.com/golangci/gosec v0.0.0-20190211064107-66fb7fc33547/go.mod h1:0qUabqiIQgfmlAmulqxyiGkkyF6/tOGSnY2cnPVwrzU= +github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc h1:gLLhTLMk2/SutryVJ6D4VZCU3CUqr8YloG7FPIBWFpI= +github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= +github.com/golangci/lint-1 v0.0.0-20190420132249-ee948d087217 h1:En/tZdwhAn0JNwLuXzP3k2RVtMqMmOEK7Yu/g3tmtJE= +github.com/golangci/lint-1 v0.0.0-20190420132249-ee948d087217/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770 h1:EL/O5HGrF7Jaq0yNhBLucz9hTuRzj2LdwGBOaENgxIk= +github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21 h1:leSNB7iYzLYSSx3J/s5sVf4Drkc68W2wm4Ixh/mr0us= +github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= +github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0 h1:HVfrLniijszjS1aiNg8JbBMO2+E1WIQ+j/gL4SQqGPg= +github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3 h1:JVnpOZS+qxli+rgVl98ILOXVNbW+kb5wcxeGx8ShUIw= +github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce h1:xdsDDbiBDQTKASoGEZ+pEmF1OnWuu8AQ9I8iNbHNeno= +github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/kisielk/gotool v0.0.0-20161130080628-0de1eaf82fa3/go.mod h1:jxZFDH7ILpTPQTk+E2s+z4CUas9lVNjIuKR4c5/zKgM= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/magiconair/properties v1.7.6 h1:U+1DqNen04MdEPgFiIwdOUiqZ8qPa37xgogX/sd3+54= +github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= +github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= +github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238 h1:+MZW2uvHgN8kYvksEN3f7eFL2wpzk0GxmlFsMybWc7E= +github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663 h1:Ri1EhipkbhWsffPJ3IPlrb4SkTOPa2PfRXp3jchBczw= +github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I= +github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pelletier/go-toml v1.1.0 h1:cmiOvKzEunMsAxyhXSzpL5Q1CRKpVv0KQsnAIcSEVYM= +github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e h1:MZM7FHLqUHYI0Y/mQAt3d2aYa0SiNms/hFqC9qJYolM= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041 h1:llrF3Fs4018ePo4+G/HV/uQUqEI1HMDjCeOf2V6puPc= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/sirupsen/logrus v1.0.5 h1:8c8b5uO0zS4X6RPl/sd1ENwSkIc0/H2PaHxE3udaE8I= +github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sourcegraph/go-diff v0.5.1 h1:gO6i5zugwzo1RVTvgvfwCOSVegNuvnNi6bAD1QCmkHs= +github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= +github.com/spf13/afero v1.1.0 h1:bopulORc2JeYaxfHLvJa5NzxviA9PoWhpiiJkru7Ji4= +github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.2.0 h1:HHl1DSRbEQN2i8tJmtS6ViPyHx35+p51amrdsiTCrkg= +github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= +github.com/spf13/cobra v0.0.2 h1:NfkwRbgViGoyjBKsLI0QMDcuMnhM+SBg3T0cGfpvKDE= +github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec h1:2ZXvIUGghLpdTVHR1UfvfrzoVlZaE/yOWC5LueIHZig= +github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.0.2 h1:Ncr3ZIuJn322w2k1qmzXDnkLAdQMlJqBa9kfAH+irso= +github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/timakin/bodyclose v0.0.0-20190407043127-4a873e97b2bb h1:lI9ufgFfvuqRctP9Ny8lDDLbSWCMxBPletcSqrnyFYM= +github.com/timakin/bodyclose v0.0.0-20190407043127-4a873e97b2bb/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= +github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529 h1:iMGN4xG0cnqj3t+zOM8wUB0BiPKHEwSxEZCvzcbZuvk= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606174628-0139d5756a7d h1:CoaGYJ9a8IXms8Q/NUeypLWbStIszTH0IIwqBUkEB9g= +golang.org/x/tools v0.0.0-20190606174628-0139d5756a7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +k8s.io/code-generator v0.0.0-20190311093542-50b561225d70 h1:lgPp615xLHxN84RBd+viA/oHzJfI0miFYFH4T9wpPQ4= +k8s.io/code-generator v0.0.0-20190311093542-50b561225d70/go.mod h1:MYiN+ZJZ9HkETbgVZdWw2AsuAi9PZ4V80cwfuf2axe8= +k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a h1:QoHVuRquf80YZ+/bovwxoMO3Q/A3nt3yTgS0/0nejuk= +k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34 h1:duVSyluuJA+u0BnkcLR01smoLrGgDTfWt5c8ODYG8fU= +mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4 h1:JPJh2pk3+X4lXAkZIk2RuE/7/FoK9maXw+TNPJhVS/c= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/executor/hack/tools/tools.go b/executor/hack/tools/tools.go new file mode 100644 index 0000000000..cf969bf1c8 --- /dev/null +++ b/executor/hack/tools/tools.go @@ -0,0 +1,17 @@ +/* +Package tools is used to track binary dependencies with go modules +https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module +*/ +package tools + +// +build tools + +import ( + // linter(s) + _ "github.com/golangci/golangci-lint/cmd/golangci-lint" + + // kubernetes code generators + _ "k8s.io/code-generator/cmd/conversion-gen" + _ "k8s.io/code-generator/cmd/deepcopy-gen" + _ "k8s.io/code-generator/cmd/defaulter-gen" +) diff --git a/executor/hack/update_generated.sh b/executor/hack/update_generated.sh new file mode 100755 index 0000000000..e696def98e --- /dev/null +++ b/executor/hack/update_generated.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash +set -o nounset +set -o errexit +set -o pipefail +set -o xtrace + +PKG_BASE=github.com/seldonio/seldon-core/executor +REPO_ROOT="${REPO_ROOT:-$(git rev-parse --show-toplevel)}" +REPO_ROOT=${REPO_ROOT}/executor +cd "${REPO_ROOT}" + +# enable modules and the proxy cache +export GO111MODULE="on" + +# build the generators +BINDIR="${REPO_ROOT}/bin" +# use the tools module +cd "hack/tools" +go build -o "${BINDIR}/defaulter-gen" k8s.io/code-generator/cmd/defaulter-gen +go build -o "${BINDIR}/deepcopy-gen" k8s.io/code-generator/cmd/deepcopy-gen +go build -o "${BINDIR}/conversion-gen" k8s.io/code-generator/cmd/conversion-gen +go build -o "${BINDIR}/client-gen" k8s.io/code-generator/cmd/client-gen +go build -o "${BINDIR}/lister-gen" k8s.io/code-generator/cmd/lister-gen +go build -o "${BINDIR}/informer-gen" k8s.io/code-generator/cmd/informer-gen +# go back to the root +cd "${REPO_ROOT}" + + +# turn off module mode before running the generators +# https://github.com/kubernetes/code-generator/issues/69 +# we also need to populate vendor +go mod vendor +export GO111MODULE="off" + +# fake being in a gopath +FAKE_GOPATH="$(mktemp -d)" +#trap 'rm -rf ${FAKE_GOPATH}' EXIT + +FAKE_REPOPATH="${FAKE_GOPATH}/src/${PKG_BASE}" +mkdir -p "$(dirname "${FAKE_REPOPATH}")" && ln -s "${REPO_ROOT}" "${FAKE_REPOPATH}" + +export GOPATH="${FAKE_GOPATH}" +cd "${FAKE_REPOPATH}" + +# run the generators +#"${BINDIR}/deepcopy-gen" -v 9 -i ./api/v1alpha2/ -O zz_generated_new.deepcopy --go-header-file hack/boilerplate.go.txt + +OUTPUT_PKG=${FAKE_REPOPATH}/client + +"${BINDIR}/client-gen" -v 9 --input-base ${PKG_BASE}/api --clientset-name versioned -i ./api/machinelearning/v1alpha2/ --input machinelearning/v1alpha2 --output-package ${PKG_BASE}/client/clientset --go-header-file hack/boilerplate.go.txt -o ${FAKE_GOPATH}/src + +"${BINDIR}/lister-gen" -v 5 -i ${PKG_BASE}/api/machinelearning/v1alpha2 --output-package ${PKG_BASE}/client/listers --go-header-file hack/boilerplate.go.txt -o ${FAKE_GOPATH}/src + +"${BINDIR}/informer-gen" -v 5 \ + -i ${PKG_BASE}/api/machinelearning/v1alpha2 \ + --versioned-clientset-package "${PKG_BASE}/client/clientset/versioned" \ + --listers-package "${PKG_BASE}/client/listers" \ + --output-package ${PKG_BASE}/client/informers \ + --go-header-file hack/boilerplate.go.txt \ + -o ${FAKE_GOPATH}/src + +export GO111MODULE="on" +cd $REPO_ROOT + +# gofmt the tree +#find . -name "*.go" -type f -print0 | xargs -0 gofmt -s -w + diff --git a/executor/licenses/README.md b/executor/licenses/README.md new file mode 100644 index 0000000000..c3022b5cc1 --- /dev/null +++ b/executor/licenses/README.md @@ -0,0 +1 @@ +Follow https://github.com/kubeflow/testing/tree/master/py/kubeflow/testing/go-license-tools diff --git a/executor/licenses/additional_license_info.csv b/executor/licenses/additional_license_info.csv new file mode 100644 index 0000000000..8ab2ddf26b --- /dev/null +++ b/executor/licenses/additional_license_info.csv @@ -0,0 +1,26 @@ +https://github.com/googleapis/google-cloud-go/blob/master/LICENSE,Apache License 2.0 +https://github.com/ghodss/yaml/blob/master/LICENSE,MIT License +https://github.com/go-openapi/spec/blob/master/LICENSE,Apache License 2.0 +https://github.com/gogo/protobuf/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License +https://github.com/gophercloud/gophercloud/blob/master/LICENSE,Apache License 2.0 +https://github.com/hpcloud/tail/blob/master/LICENSE.txt,MIT License +https://github.com/inconshreveable/mousetrap/blob/master/LICENSE,Apache License 2.0 +https://github.com/pmezard/go-difflib/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License +https://github.com/uber-go/zap/blob/master/LICENSE.txt,MIT License +https://github.com/golang/crypto/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License +https://github.com/golang/exp/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License +https://github.com/golang/net/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License +https://github.com/golang/sync/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License +https://github.com/golang/sys/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License +https://github.com/golang/text/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License +https://github.com/golang/time/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License +https://github.com/golang/tools/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License +https://github.com/golang/xerrors/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License +https://github.com/googleapis/google-api-go-client/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License +https://github.com/gotestyourself/gotest.tools/blob/master/LICENSE,Apache License 2.0 +https://github.com/go-check/check/blob/v1/LICENSE,BSD 2-Clause "Simplified" License +https://github.com/go-inf/inf/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License +https://github.com/go-tomb/tomb/blob/v1/LICENSE,BSD 3-Clause "New" or "Revised" License +https://github.com/go-yaml/yaml/blob/v2/LICENSE,Apache License 2.0 +https://github.com/kubernetes-sigs/yaml/blob/master/LICENSE,MIT License +https://github.com/dominikh/go-tools/blob/master/LICENSE,MIT License diff --git a/executor/licenses/dep.txt b/executor/licenses/dep.txt new file mode 100644 index 0000000000..2a22d1aad9 --- /dev/null +++ b/executor/licenses/dep.txt @@ -0,0 +1,151 @@ +github.com/seldonio/seldon-core/executor +cloud.google.com/go +contrib.go.opencensus.io/exporter/prometheus +github.com/Azure/azure-sdk-for-go +github.com/Azure/go-autorest/autorest +github.com/Azure/go-autorest/autorest/adal +github.com/Azure/go-autorest/autorest/date +github.com/Azure/go-autorest/autorest/mocks +github.com/Azure/go-autorest/autorest/to +github.com/Azure/go-autorest/autorest/validation +github.com/Azure/go-autorest/logger +github.com/Azure/go-autorest/tracing +github.com/BurntSushi/toml +github.com/NYTimes/gziphandler +github.com/PuerkitoBio/purell +github.com/PuerkitoBio/urlesc +github.com/alecthomas/template +github.com/alecthomas/units +github.com/beorn7/perks +github.com/cespare/xxhash/v2 +github.com/client9/misspell +github.com/cloudevents/sdk-go +github.com/davecgh/go-spew +github.com/dgrijalva/jwt-go +github.com/docopt/docopt-go +github.com/emicklei/go-restful +github.com/evanphx/json-patch +github.com/fatih/color +github.com/fortytw2/leaktest +github.com/fsnotify/fsnotify +github.com/ghodss/yaml +github.com/go-kit/kit +github.com/go-logfmt/logfmt +github.com/go-logr/logr +github.com/go-logr/zapr +github.com/go-openapi/jsonpointer +github.com/go-openapi/jsonreference +github.com/go-openapi/spec +github.com/go-openapi/swag +github.com/go-stack/stack +github.com/gobuffalo/flect +github.com/gogo/protobuf +github.com/golang/glog +github.com/golang/groupcache +github.com/golang/mock +github.com/golang/protobuf +github.com/google/btree +github.com/google/go-cmp +github.com/google/gofuzz +github.com/google/martian +github.com/google/pprof +github.com/google/uuid +github.com/googleapis/gax-go/v2 +github.com/googleapis/gnostic +github.com/gophercloud/gophercloud +github.com/gorilla/mux +github.com/grpc-ecosystem/go-grpc-middleware +github.com/grpc-ecosystem/go-grpc-prometheus +github.com/hashicorp/golang-lru +github.com/hpcloud/tail +github.com/imdario/mergo +github.com/inconshreveable/mousetrap +github.com/json-iterator/go +github.com/jstemmer/go-junit-report +github.com/julienschmidt/httprouter +github.com/kelseyhightower/envconfig +github.com/kisielk/errcheck +github.com/kisielk/gotool +github.com/konsorten/go-windows-terminal-sequences +github.com/kr/logfmt +github.com/kr/pretty +github.com/kr/pty +github.com/kr/text +github.com/mailru/easyjson +github.com/mattn/go-colorable +github.com/mattn/go-isatty +github.com/matttproud/golang_protobuf_extensions +github.com/modern-go/concurrent +github.com/modern-go/reflect2 +github.com/munnerz/goautoneg +github.com/mwitkow/go-conntrack +github.com/nats-io/jwt +github.com/nats-io/nats-server/v2 +github.com/nats-io/nats.go +github.com/nats-io/nkeys +github.com/nats-io/nuid +github.com/onsi/ginkgo +github.com/onsi/gomega +github.com/opentracing/opentracing-go +github.com/pborman/uuid +github.com/pkg/errors +github.com/pmezard/go-difflib +github.com/prometheus/client_golang +github.com/prometheus/client_model +github.com/prometheus/common +github.com/prometheus/procfs +github.com/seldonio/seldon-core/operator +github.com/sirupsen/logrus +github.com/spf13/afero +github.com/spf13/cobra +github.com/spf13/pflag +github.com/stretchr/objx +github.com/stretchr/testify +github.com/tensorflow/tensorflow +github.com/tensorflow/tensorflow/tensorflow/go/core +github.com/uber/jaeger-client-go +github.com/uber/jaeger-lib +go.opencensus.io +go.uber.org/atomic +go.uber.org/multierr +go.uber.org/zap +golang.org/x/crypto +golang.org/x/exp +golang.org/x/lint +golang.org/x/net +golang.org/x/oauth2 +golang.org/x/sync +golang.org/x/sys +golang.org/x/text +golang.org/x/time +golang.org/x/tools +golang.org/x/xerrors +gomodules.xyz/jsonpatch/v2 +google.golang.org/api +google.golang.org/appengine +google.golang.org/genproto +google.golang.org/grpc +gopkg.in/alecthomas/kingpin.v2 +gopkg.in/check.v1 +gopkg.in/fsnotify.v1 +gopkg.in/inf.v0 +gopkg.in/tomb.v1 +gopkg.in/yaml.v2 +gotest.tools +honnef.co/go/tools +k8s.io/api +k8s.io/apiextensions-apiserver +k8s.io/apimachinery +k8s.io/client-go +k8s.io/gengo +k8s.io/klog +k8s.io/kube-openapi +k8s.io/utils +knative.dev/pkg +pack.ag/amqp +rsc.io/binaryregexp +sigs.k8s.io/controller-runtime +sigs.k8s.io/controller-tools +sigs.k8s.io/structured-merge-diff +sigs.k8s.io/testing_frameworks +sigs.k8s.io/yaml diff --git a/executor/licenses/dep_repo.manual.csv b/executor/licenses/dep_repo.manual.csv new file mode 100644 index 0000000000..8fec4fbe7d --- /dev/null +++ b/executor/licenses/dep_repo.manual.csv @@ -0,0 +1,2 @@ +gomodules.xyz/jsonpatch/v2,gomodules/jsonpatch +honnef.co/go/tools,dominikh/go-tools diff --git a/executor/licenses/license.txt b/executor/licenses/license.txt new file mode 100644 index 0000000000..9936b93ce2 --- /dev/null +++ b/executor/licenses/license.txt @@ -0,0 +1,15099 @@ +-------------------------------------------------------------------------------- +seldonio/seldon-core Apache License 2.0 https://github.com/SeldonIO/seldon-core/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 Seldon Technologies Ltd. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +GoogleCloudPlatform/gcloud-golang Apache License 2.0 https://github.com/googleapis/google-cloud-go/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +census-ecosystem/opencensus-go-exporter-prometheus Apache License 2.0 https://github.com/census-ecosystem/opencensus-go-exporter-prometheus/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +Azure/azure-sdk-for-go Apache License 2.0 https://github.com/Azure/azure-sdk-for-go/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +Azure/go-autorest Apache License 2.0 https://github.com/Azure/go-autorest/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +BurntSushi/toml MIT License https://github.com/BurntSushi/toml/blob/master/COPYING +-------------------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +-------------------------------------------------------------------------------- +NYTimes/gziphandler Apache License 2.0 https://github.com/nytimes/gziphandler/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +PuerkitoBio/purell BSD 3-Clause "New" or "Revised" License https://github.com/PuerkitoBio/purell/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2012, Martin Angers +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +PuerkitoBio/urlesc BSD 3-Clause "New" or "Revised" License https://github.com/PuerkitoBio/urlesc/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +alecthomas/template BSD 3-Clause "New" or "Revised" License https://github.com/alecthomas/template/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +alecthomas/units MIT License https://github.com/alecthomas/units/blob/master/COPYING +-------------------------------------------------------------------------------- +Copyright (C) 2014 Alec Thomas + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +beorn7/perks MIT License https://github.com/beorn7/perks/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- +cespare/xxhash MIT License https://github.com/cespare/xxhash/blob/master/LICENSE.txt +-------------------------------------------------------------------------------- +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- +client9/misspell MIT License https://github.com/client9/misspell/blob/master/LICENSE +-------------------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2015-2017 Nick Galbreath + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +cloudevents/sdk-go Apache License 2.0 https://github.com/cloudevents/sdk-go/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +davecgh/go-spew ISC License https://github.com/davecgh/go-spew/blob/master/LICENSE +-------------------------------------------------------------------------------- +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-------------------------------------------------------------------------------- +dgrijalva/jwt-go MIT License https://github.com/dgrijalva/jwt-go/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +docopt/docopt-go MIT License https://github.com/docopt/docopt.go/blob/master/LICENSE +-------------------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2013 Keith Batten +Copyright (c) 2016 David Irvine + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- +emicklei/go-restful MIT License https://github.com/emicklei/go-restful/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2012,2013 Ernest Micklei + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +evanphx/json-patch BSD 3-Clause "New" or "Revised" License https://github.com/evanphx/json-patch/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +fatih/color MIT License https://github.com/fatih/color/blob/master/LICENSE.md +-------------------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- +fortytw2/leaktest BSD 3-Clause "New" or "Revised" License https://github.com/fortytw2/leaktest/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +fsnotify/fsnotify BSD 3-Clause "New" or "Revised" License https://github.com/fsnotify/fsnotify/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +ghodss/yaml MIT License https://github.com/ghodss/yaml/blob/master/LICENSE +-------------------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +go-kit/kit MIT License https://github.com/go-kit/kit/blob/master/LICENSE +-------------------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2015 Peter Bourgon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +go-logfmt/logfmt MIT License https://github.com/go-logfmt/logfmt/blob/master/LICENSE +-------------------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2015 go-logfmt + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +go-logr/logr Apache License 2.0 https://github.com/go-logr/logr/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +go-logr/zapr Apache License 2.0 https://github.com/go-logr/zapr/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +go-openapi/jsonpointer Apache License 2.0 https://github.com/go-openapi/jsonpointer/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +go-openapi/jsonreference Apache License 2.0 https://github.com/go-openapi/jsonreference/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +go-openapi/spec Apache License 2.0 https://github.com/go-openapi/spec/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +go-openapi/swag Apache License 2.0 https://github.com/go-openapi/swag/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +go-stack/stack MIT License https://github.com/go-stack/stack/blob/master/LICENSE.md +-------------------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2014 Chris Hines + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +gobuffalo/flect MIT License https://github.com/gobuffalo/flect/blob/master/LICENSE +-------------------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2019 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +gogo/protobuf BSD 3-Clause "New" or "Revised" License https://github.com/gogo/protobuf/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +golang/glog Apache License 2.0 https://github.com/golang/glog/blob/master/LICENSE +-------------------------------------------------------------------------------- +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +golang/groupcache Apache License 2.0 https://github.com/golang/groupcache/blob/master/LICENSE +-------------------------------------------------------------------------------- +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +golang/mock Apache License 2.0 https://github.com/golang/mock/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +golang/protobuf BSD 3-Clause "New" or "Revised" License https://github.com/golang/protobuf/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright 2010 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +google/btree Apache License 2.0 https://github.com/google/btree/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +google/go-cmp BSD 3-Clause "New" or "Revised" License https://github.com/google/go-cmp/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +google/gofuzz Apache License 2.0 https://github.com/google/gofuzz/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +google/martian Apache License 2.0 https://github.com/google/martian/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +google/pprof Apache License 2.0 https://github.com/google/pprof/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +google/uuid BSD 3-Clause "New" or "Revised" License https://github.com/google/uuid/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +googleapis/gax-go BSD 3-Clause "New" or "Revised" License https://github.com/googleapis/gax-go/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright 2016, Google Inc. +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +googleapis/gnostic Apache License 2.0 https://github.com/googleapis/gnostic/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +gophercloud/gophercloud Apache License 2.0 https://github.com/gophercloud/gophercloud/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright 2012-2013 Rackspace, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. + +------ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +-------------------------------------------------------------------------------- +gorilla/mux BSD 3-Clause "New" or "Revised" License https://github.com/gorilla/mux/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +grpc-ecosystem/go-grpc-middleware Apache License 2.0 https://github.com/grpc-ecosystem/go-grpc-middleware/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +-------------------------------------------------------------------------------- +grpc-ecosystem/go-grpc-prometheus Apache License 2.0 https://github.com/grpc-ecosystem/go-grpc-prometheus/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +-------------------------------------------------------------------------------- +hashicorp/golang-lru Mozilla Public License 2.0 https://github.com/hashicorp/golang-lru/blob/master/LICENSE +-------------------------------------------------------------------------------- +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + +-------------------------------------------------------------------------------- +hpcloud/tail MIT License https://github.com/hpcloud/tail/blob/master/LICENSE.txt +-------------------------------------------------------------------------------- +# The MIT License (MIT) + +# ยฉ Copyright 2015 Hewlett Packard Enterprise Development LP +Copyright (c) 2014 ActiveState + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +imdario/mergo BSD 3-Clause "New" or "Revised" License https://github.com/imdario/mergo/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2013 Dario Castaรฑรฉ. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +inconshreveable/mousetrap Apache License 2.0 https://github.com/inconshreveable/mousetrap/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright 2014 Alan Shreve + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- +json-iterator/go MIT License https://github.com/json-iterator/go/blob/master/LICENSE +-------------------------------------------------------------------------------- +MIT License + +Copyright (c) 2016 json-iterator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +jstemmer/go-junit-report MIT License https://github.com/jstemmer/go-junit-report/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2012 Joel Stemmer + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- +julienschmidt/httprouter BSD 3-Clause "New" or "Revised" License https://github.com/julienschmidt/httprouter/blob/master/LICENSE +-------------------------------------------------------------------------------- +BSD 3-Clause License + +Copyright (c) 2013, Julien Schmidt +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +kelseyhightower/envconfig MIT License https://github.com/kelseyhightower/envconfig/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2013 Kelsey Hightower + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +kisielk/errcheck MIT License https://github.com/kisielk/errcheck/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2013 Kamil Kisiel + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- +kisielk/gotool MIT License https://github.com/kisielk/gotool/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2013 Kamil Kisiel + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- +konsorten/go-windows-terminal-sequences MIT License https://github.com/konsorten/go-windows-terminal-sequences/blob/master/LICENSE +-------------------------------------------------------------------------------- +(The MIT License) + +Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- +kr/pretty MIT License https://github.com/kr/pretty/blob/main/License +-------------------------------------------------------------------------------- +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +-------------------------------------------------------------------------------- +kr/pty MIT License https://github.com/kr/pty/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2019 Keith Rarick + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall +be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- +kr/text MIT License https://github.com/kr/text/blob/main/License +-------------------------------------------------------------------------------- +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +-------------------------------------------------------------------------------- +mailru/easyjson MIT License https://github.com/mailru/easyjson/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- +mattn/go-colorable MIT License https://github.com/mattn/go-colorable/blob/master/LICENSE +-------------------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +mattn/go-isatty MIT License https://github.com/mattn/go-isatty/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- +matttproud/golang_protobuf_extensions Apache License 2.0 https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +modern-go/concurrent Apache License 2.0 https://github.com/modern-go/concurrent/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +modern-go/reflect2 Apache License 2.0 https://github.com/modern-go/reflect2/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +munnerz/goautoneg BSD 3-Clause "New" or "Revised" License https://github.com/munnerz/goautoneg/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +mwitkow/go-conntrack Apache License 2.0 https://github.com/mwitkow/go-conntrack/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +nats-io/jwt Apache License 2.0 https://github.com/nats-io/jwt/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +nats-io/nats-server Apache License 2.0 https://github.com/nats-io/nats-server/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +nats-io/nats.go Apache License 2.0 https://github.com/nats-io/nats.go/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +nats-io/nkeys Apache License 2.0 https://github.com/nats-io/nkeys/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +nats-io/nuid Apache License 2.0 https://github.com/nats-io/nuid/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +onsi/ginkgo MIT License https://github.com/onsi/ginkgo/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- +onsi/gomega MIT License https://github.com/onsi/gomega/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- +opentracing/opentracing-go Apache License 2.0 https://github.com/opentracing/opentracing-go/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 The OpenTracing Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +pborman/uuid BSD 3-Clause "New" or "Revised" License https://github.com/pborman/uuid/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +pkg/errors BSD 2-Clause "Simplified" License https://github.com/pkg/errors/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +pmezard/go-difflib BSD 3-Clause "New" or "Revised" License https://github.com/pmezard/go-difflib/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +prometheus/client_golang Apache License 2.0 https://github.com/prometheus/client_golang/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +prometheus/client_model Apache License 2.0 https://github.com/prometheus/client_model/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +prometheus/common Apache License 2.0 https://github.com/prometheus/common/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +prometheus/procfs Apache License 2.0 https://github.com/prometheus/procfs/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +sirupsen/logrus MIT License https://github.com/sirupsen/logrus/blob/master/LICENSE +-------------------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +-------------------------------------------------------------------------------- +spf13/afero Apache License 2.0 https://github.com/spf13/afero/blob/master/LICENSE.txt +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +-------------------------------------------------------------------------------- +spf13/cobra Apache License 2.0 https://github.com/spf13/cobra/blob/master/LICENSE.txt +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +-------------------------------------------------------------------------------- +spf13/pflag BSD 3-Clause "New" or "Revised" License https://github.com/spf13/pflag/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2012 Alex Ogier. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +stretchr/objx MIT License https://github.com/stretchr/objx/blob/master/LICENSE +-------------------------------------------------------------------------------- +The MIT License + +Copyright (c) 2014 Stretchr, Inc. +Copyright (c) 2017-2018 objx contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +stretchr/testify MIT License https://github.com/stretchr/testify/blob/master/LICENSE +-------------------------------------------------------------------------------- +MIT License + +Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +tensorflow/tensorflow Apache License 2.0 https://github.com/tensorflow/tensorflow/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright 2019 The TensorFlow Authors. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +uber/jaeger-client-go Apache License 2.0 https://github.com/jaegertracing/jaeger-client-go/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +uber/jaeger-lib Apache License 2.0 https://github.com/jaegertracing/jaeger-lib/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +census-instrumentation/opencensus-go Apache License 2.0 https://github.com/census-instrumentation/opencensus-go/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +-------------------------------------------------------------------------------- +uber-go/atomic MIT License https://github.com/uber-go/atomic/blob/master/LICENSE.txt +-------------------------------------------------------------------------------- +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +-------------------------------------------------------------------------------- +uber-go/multierr MIT License https://github.com/uber-go/multierr/blob/master/LICENSE.txt +-------------------------------------------------------------------------------- +Copyright (c) 2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +-------------------------------------------------------------------------------- +uber-go/zap MIT License https://github.com/uber-go/zap/blob/master/LICENSE.txt +-------------------------------------------------------------------------------- +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +-------------------------------------------------------------------------------- +golang/crypto BSD 3-Clause "New" or "Revised" License https://github.com/golang/crypto/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +golang/exp BSD 3-Clause "New" or "Revised" License https://github.com/golang/exp/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +golang/lint BSD 3-Clause "New" or "Revised" License https://github.com/golang/lint/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +golang/net BSD 3-Clause "New" or "Revised" License https://github.com/golang/net/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +golang/oauth2 BSD 3-Clause "New" or "Revised" License https://github.com/golang/oauth2/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +golang/sync BSD 3-Clause "New" or "Revised" License https://github.com/golang/sync/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +golang/sys BSD 3-Clause "New" or "Revised" License https://github.com/golang/sys/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +golang/text BSD 3-Clause "New" or "Revised" License https://github.com/golang/text/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +golang/time BSD 3-Clause "New" or "Revised" License https://github.com/golang/time/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +golang/tools BSD 3-Clause "New" or "Revised" License https://github.com/golang/tools/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +golang/xerrors BSD 3-Clause "New" or "Revised" License https://github.com/golang/xerrors/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2019 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +gomodules/jsonpatch Apache License 2.0 https://github.com/gomodules/jsonpatch/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +google/google-api-go-client BSD 3-Clause "New" or "Revised" License https://github.com/googleapis/google-api-go-client/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2011 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +golang/appengine Apache License 2.0 https://github.com/golang/appengine/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +google/go-genproto Apache License 2.0 https://github.com/googleapis/go-genproto/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +grpc/grpc-go Apache License 2.0 https://github.com/grpc/grpc-go/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +alecthomas/kingpin MIT License https://github.com/alecthomas/kingpin/blob/master/COPYING +-------------------------------------------------------------------------------- +Copyright (C) 2014 Alec Thomas + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +go-check/check BSD 2-Clause "Simplified" License https://github.com/go-check/check/blob/v1/LICENSE +-------------------------------------------------------------------------------- +Gocheck - A rich testing framework for Go + +Copyright (c) 2010-2013 Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +go-inf/inf BSD 3-Clause "New" or "Revised" License https://github.com/go-inf/inf/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2012 Pรฉter Surรกnyi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +go-tomb/tomb BSD 3-Clause "New" or "Revised" License https://github.com/go-tomb/tomb/blob/v1/LICENSE +-------------------------------------------------------------------------------- +tomb - support for clean goroutine termination in Go. + +Copyright (c) 2010-2011 - Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +go-yaml/yaml Apache License 2.0 https://github.com/go-yaml/yaml/blob/v2/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +gotestyourself/gotest.tools Apache License 2.0 https://github.com/gotestyourself/gotest.tools/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright 2018 gotest.tools authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- +dominikh/go-tools MIT License https://github.com/dominikh/go-tools/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2016 Dominik Honnef + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- +kubernetes/api Apache License 2.0 https://github.com/kubernetes/api/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +kubernetes/apiextensions-apiserver Apache License 2.0 https://github.com/kubernetes/apiextensions-apiserver/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +kubernetes/apimachinery Apache License 2.0 https://github.com/kubernetes/apimachinery/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +kubernetes/client-go Apache License 2.0 https://github.com/kubernetes/client-go/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +kubernetes/gengo Apache License 2.0 https://github.com/kubernetes/gengo/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +kubernetes/klog Apache License 2.0 https://github.com/kubernetes/klog/blob/master/LICENSE +-------------------------------------------------------------------------------- +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +kubernetes/kube-openapi Apache License 2.0 https://github.com/kubernetes/kube-openapi/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +kubernetes/utils Apache License 2.0 https://github.com/kubernetes/utils/blob/master/LICENSE +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +knative/pkg Apache License 2.0 https://github.com/knative/pkg/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +vcabbage/amqp MIT License https://github.com/vcabbage/amqp/blob/master/LICENSE +-------------------------------------------------------------------------------- +MIT License + +Copyright (C) 2017 Kale Blankenship + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +rsc/binaryregexp BSD 3-Clause "New" or "Revised" License https://github.com/rsc/binaryregexp/blob/master/LICENSE +-------------------------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +kubernetes-sigs/controller-runtime Apache License 2.0 https://github.com/kubernetes-sigs/controller-runtime/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +kubernetes-sigs/controller-tools Apache License 2.0 https://github.com/kubernetes-sigs/controller-tools/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +kubernetes-sigs/structured-merge-diff Apache License 2.0 https://github.com/kubernetes-sigs/structured-merge-diff/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +kubernetes-sigs/testing_frameworks Apache License 2.0 https://github.com/kubernetes-retired/testing_frameworks/blob/master/LICENSE +-------------------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 The Kubernetes Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +kubernetes-sigs/yaml MIT License https://github.com/kubernetes-sigs/yaml/blob/master/LICENSE +-------------------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/executor/licenses/license_info.csv b/executor/licenses/license_info.csv new file mode 100644 index 0000000000..2e93e58744 --- /dev/null +++ b/executor/licenses/license_info.csv @@ -0,0 +1,140 @@ +seldonio/seldon-core,https://github.com/SeldonIO/seldon-core/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/SeldonIO/seldon-core/master/LICENSE +GoogleCloudPlatform/gcloud-golang,https://github.com/googleapis/google-cloud-go/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/googleapis/google-cloud-go/master/LICENSE +census-ecosystem/opencensus-go-exporter-prometheus,https://github.com/census-ecosystem/opencensus-go-exporter-prometheus/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/census-ecosystem/opencensus-go-exporter-prometheus/master/LICENSE +Azure/azure-sdk-for-go,https://github.com/Azure/azure-sdk-for-go/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/Azure/azure-sdk-for-go/master/LICENSE +Azure/go-autorest,https://github.com/Azure/go-autorest/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/Azure/go-autorest/master/LICENSE +BurntSushi/toml,https://github.com/BurntSushi/toml/blob/master/COPYING,MIT License,https://raw.githubusercontent.com/BurntSushi/toml/master/COPYING +NYTimes/gziphandler,https://github.com/nytimes/gziphandler/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/nytimes/gziphandler/master/LICENSE +PuerkitoBio/purell,https://github.com/PuerkitoBio/purell/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/PuerkitoBio/purell/master/LICENSE +PuerkitoBio/urlesc,https://github.com/PuerkitoBio/urlesc/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/PuerkitoBio/urlesc/master/LICENSE +alecthomas/template,https://github.com/alecthomas/template/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/alecthomas/template/master/LICENSE +alecthomas/units,https://github.com/alecthomas/units/blob/master/COPYING,MIT License,https://raw.githubusercontent.com/alecthomas/units/master/COPYING +beorn7/perks,https://github.com/beorn7/perks/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/beorn7/perks/master/LICENSE +cespare/xxhash,https://github.com/cespare/xxhash/blob/master/LICENSE.txt,MIT License,https://raw.githubusercontent.com/cespare/xxhash/master/LICENSE.txt +client9/misspell,https://github.com/client9/misspell/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/client9/misspell/master/LICENSE +cloudevents/sdk-go,https://github.com/cloudevents/sdk-go/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/cloudevents/sdk-go/master/LICENSE +davecgh/go-spew,https://github.com/davecgh/go-spew/blob/master/LICENSE,ISC License,https://raw.githubusercontent.com/davecgh/go-spew/master/LICENSE +dgrijalva/jwt-go,https://github.com/dgrijalva/jwt-go/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/dgrijalva/jwt-go/master/LICENSE +docopt/docopt-go,https://github.com/docopt/docopt.go/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/docopt/docopt.go/master/LICENSE +emicklei/go-restful,https://github.com/emicklei/go-restful/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/emicklei/go-restful/master/LICENSE +evanphx/json-patch,https://github.com/evanphx/json-patch/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/evanphx/json-patch/master/LICENSE +fatih/color,https://github.com/fatih/color/blob/master/LICENSE.md,MIT License,https://raw.githubusercontent.com/fatih/color/master/LICENSE.md +fortytw2/leaktest,https://github.com/fortytw2/leaktest/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/fortytw2/leaktest/master/LICENSE +fsnotify/fsnotify,https://github.com/fsnotify/fsnotify/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/fsnotify/fsnotify/master/LICENSE +ghodss/yaml,https://github.com/ghodss/yaml/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/ghodss/yaml/master/LICENSE +go-kit/kit,https://github.com/go-kit/kit/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/go-kit/kit/master/LICENSE +go-logfmt/logfmt,https://github.com/go-logfmt/logfmt/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/go-logfmt/logfmt/master/LICENSE +go-logr/logr,https://github.com/go-logr/logr/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/go-logr/logr/master/LICENSE +go-logr/zapr,https://github.com/go-logr/zapr/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/go-logr/zapr/master/LICENSE +go-openapi/jsonpointer,https://github.com/go-openapi/jsonpointer/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE +go-openapi/jsonreference,https://github.com/go-openapi/jsonreference/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE +go-openapi/spec,https://github.com/go-openapi/spec/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE +go-openapi/swag,https://github.com/go-openapi/swag/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE +go-stack/stack,https://github.com/go-stack/stack/blob/master/LICENSE.md,MIT License,https://raw.githubusercontent.com/go-stack/stack/master/LICENSE.md +gobuffalo/flect,https://github.com/gobuffalo/flect/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/gobuffalo/flect/master/LICENSE +gogo/protobuf,https://github.com/gogo/protobuf/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/gogo/protobuf/master/LICENSE +golang/glog,https://github.com/golang/glog/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/golang/glog/master/LICENSE +golang/groupcache,https://github.com/golang/groupcache/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/golang/groupcache/master/LICENSE +golang/mock,https://github.com/golang/mock/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/golang/mock/master/LICENSE +golang/protobuf,https://github.com/golang/protobuf/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/golang/protobuf/master/LICENSE +google/btree,https://github.com/google/btree/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/google/btree/master/LICENSE +google/go-cmp,https://github.com/google/go-cmp/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/google/go-cmp/master/LICENSE +google/gofuzz,https://github.com/google/gofuzz/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/google/gofuzz/master/LICENSE +google/martian,https://github.com/google/martian/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/google/martian/master/LICENSE +google/pprof,https://github.com/google/pprof/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/google/pprof/master/LICENSE +google/uuid,https://github.com/google/uuid/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/google/uuid/master/LICENSE +googleapis/gax-go,https://github.com/googleapis/gax-go/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/googleapis/gax-go/master/LICENSE +googleapis/gnostic,https://github.com/googleapis/gnostic/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/googleapis/gnostic/master/LICENSE +gophercloud/gophercloud,https://github.com/gophercloud/gophercloud/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/gophercloud/gophercloud/master/LICENSE +gorilla/mux,https://github.com/gorilla/mux/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/gorilla/mux/master/LICENSE +grpc-ecosystem/go-grpc-middleware,https://github.com/grpc-ecosystem/go-grpc-middleware/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/grpc-ecosystem/go-grpc-middleware/master/LICENSE +grpc-ecosystem/go-grpc-prometheus,https://github.com/grpc-ecosystem/go-grpc-prometheus/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/grpc-ecosystem/go-grpc-prometheus/master/LICENSE +hashicorp/golang-lru,https://github.com/hashicorp/golang-lru/blob/master/LICENSE,Mozilla Public License 2.0,https://raw.githubusercontent.com/hashicorp/golang-lru/master/LICENSE +hpcloud/tail,https://github.com/hpcloud/tail/blob/master/LICENSE.txt,MIT License,https://raw.githubusercontent.com/hpcloud/tail/master/LICENSE.txt +imdario/mergo,https://github.com/imdario/mergo/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/imdario/mergo/master/LICENSE +inconshreveable/mousetrap,https://github.com/inconshreveable/mousetrap/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/inconshreveable/mousetrap/master/LICENSE +json-iterator/go,https://github.com/json-iterator/go/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/json-iterator/go/master/LICENSE +jstemmer/go-junit-report,https://github.com/jstemmer/go-junit-report/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/jstemmer/go-junit-report/master/LICENSE +julienschmidt/httprouter,https://github.com/julienschmidt/httprouter/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/julienschmidt/httprouter/master/LICENSE +kelseyhightower/envconfig,https://github.com/kelseyhightower/envconfig/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/kelseyhightower/envconfig/master/LICENSE +kisielk/errcheck,https://github.com/kisielk/errcheck/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/kisielk/errcheck/master/LICENSE +kisielk/gotool,https://github.com/kisielk/gotool/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/kisielk/gotool/master/LICENSE +konsorten/go-windows-terminal-sequences,https://github.com/konsorten/go-windows-terminal-sequences/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/konsorten/go-windows-terminal-sequences/master/LICENSE +kr/pretty,https://github.com/kr/pretty/blob/main/License,MIT License,https://raw.githubusercontent.com/kr/pretty/main/License +kr/pty,https://github.com/kr/pty/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/kr/pty/master/LICENSE +kr/text,https://github.com/kr/text/blob/main/License,MIT License,https://raw.githubusercontent.com/kr/text/main/License +mailru/easyjson,https://github.com/mailru/easyjson/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/mailru/easyjson/master/LICENSE +mattn/go-colorable,https://github.com/mattn/go-colorable/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/mattn/go-colorable/master/LICENSE +mattn/go-isatty,https://github.com/mattn/go-isatty/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/mattn/go-isatty/master/LICENSE +matttproud/golang_protobuf_extensions,https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/matttproud/golang_protobuf_extensions/master/LICENSE +modern-go/concurrent,https://github.com/modern-go/concurrent/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/modern-go/concurrent/master/LICENSE +modern-go/reflect2,https://github.com/modern-go/reflect2/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/modern-go/reflect2/master/LICENSE +munnerz/goautoneg,https://github.com/munnerz/goautoneg/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/munnerz/goautoneg/master/LICENSE +mwitkow/go-conntrack,https://github.com/mwitkow/go-conntrack/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/mwitkow/go-conntrack/master/LICENSE +nats-io/jwt,https://github.com/nats-io/jwt/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/nats-io/jwt/master/LICENSE +nats-io/nats-server,https://github.com/nats-io/nats-server/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/nats-io/nats-server/master/LICENSE +nats-io/nats.go,https://github.com/nats-io/nats.go/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/nats-io/nats.go/master/LICENSE +nats-io/nkeys,https://github.com/nats-io/nkeys/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/nats-io/nkeys/master/LICENSE +nats-io/nuid,https://github.com/nats-io/nuid/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/nats-io/nuid/master/LICENSE +onsi/ginkgo,https://github.com/onsi/ginkgo/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/onsi/ginkgo/master/LICENSE +onsi/gomega,https://github.com/onsi/gomega/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/onsi/gomega/master/LICENSE +opentracing/opentracing-go,https://github.com/opentracing/opentracing-go/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/opentracing/opentracing-go/master/LICENSE +pborman/uuid,https://github.com/pborman/uuid/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/pborman/uuid/master/LICENSE +pkg/errors,https://github.com/pkg/errors/blob/master/LICENSE,BSD 2-Clause "Simplified" License,https://raw.githubusercontent.com/pkg/errors/master/LICENSE +pmezard/go-difflib,https://github.com/pmezard/go-difflib/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/pmezard/go-difflib/master/LICENSE +prometheus/client_golang,https://github.com/prometheus/client_golang/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/prometheus/client_golang/master/LICENSE +prometheus/client_model,https://github.com/prometheus/client_model/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/prometheus/client_model/master/LICENSE +prometheus/common,https://github.com/prometheus/common/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/prometheus/common/master/LICENSE +prometheus/procfs,https://github.com/prometheus/procfs/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/prometheus/procfs/master/LICENSE +sirupsen/logrus,https://github.com/sirupsen/logrus/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/sirupsen/logrus/master/LICENSE +spf13/afero,https://github.com/spf13/afero/blob/master/LICENSE.txt,Apache License 2.0,https://raw.githubusercontent.com/spf13/afero/master/LICENSE.txt +spf13/cobra,https://github.com/spf13/cobra/blob/master/LICENSE.txt,Apache License 2.0,https://raw.githubusercontent.com/spf13/cobra/master/LICENSE.txt +spf13/pflag,https://github.com/spf13/pflag/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/spf13/pflag/master/LICENSE +stretchr/objx,https://github.com/stretchr/objx/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/stretchr/objx/master/LICENSE +stretchr/testify,https://github.com/stretchr/testify/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/stretchr/testify/master/LICENSE +tensorflow/tensorflow,https://github.com/tensorflow/tensorflow/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/tensorflow/tensorflow/master/LICENSE +uber/jaeger-client-go,https://github.com/jaegertracing/jaeger-client-go/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/jaegertracing/jaeger-client-go/master/LICENSE +uber/jaeger-lib,https://github.com/jaegertracing/jaeger-lib/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/jaegertracing/jaeger-lib/master/LICENSE +census-instrumentation/opencensus-go,https://github.com/census-instrumentation/opencensus-go/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/census-instrumentation/opencensus-go/master/LICENSE +uber-go/atomic,https://github.com/uber-go/atomic/blob/master/LICENSE.txt,MIT License,https://raw.githubusercontent.com/uber-go/atomic/master/LICENSE.txt +uber-go/multierr,https://github.com/uber-go/multierr/blob/master/LICENSE.txt,MIT License,https://raw.githubusercontent.com/uber-go/multierr/master/LICENSE.txt +uber-go/zap,https://github.com/uber-go/zap/blob/master/LICENSE.txt,MIT License,https://raw.githubusercontent.com/uber-go/zap/master/LICENSE.txt +golang/crypto,https://github.com/golang/crypto/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/golang/crypto/master/LICENSE +golang/exp,https://github.com/golang/exp/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/golang/exp/master/LICENSE +golang/lint,https://github.com/golang/lint/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/golang/lint/master/LICENSE +golang/net,https://github.com/golang/net/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/golang/net/master/LICENSE +golang/oauth2,https://github.com/golang/oauth2/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/golang/oauth2/master/LICENSE +golang/sync,https://github.com/golang/sync/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/golang/sync/master/LICENSE +golang/sys,https://github.com/golang/sys/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/golang/sys/master/LICENSE +golang/text,https://github.com/golang/text/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/golang/text/master/LICENSE +golang/time,https://github.com/golang/time/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/golang/time/master/LICENSE +golang/tools,https://github.com/golang/tools/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/golang/tools/master/LICENSE +golang/xerrors,https://github.com/golang/xerrors/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/golang/xerrors/master/LICENSE +gomodules/jsonpatch,https://github.com/gomodules/jsonpatch/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/gomodules/jsonpatch/master/LICENSE +google/google-api-go-client,https://github.com/googleapis/google-api-go-client/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/googleapis/google-api-go-client/master/LICENSE +golang/appengine,https://github.com/golang/appengine/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/golang/appengine/master/LICENSE +google/go-genproto,https://github.com/googleapis/go-genproto/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/googleapis/go-genproto/master/LICENSE +grpc/grpc-go,https://github.com/grpc/grpc-go/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/grpc/grpc-go/master/LICENSE +alecthomas/kingpin,https://github.com/alecthomas/kingpin/blob/master/COPYING,MIT License,https://raw.githubusercontent.com/alecthomas/kingpin/master/COPYING +go-check/check,https://github.com/go-check/check/blob/v1/LICENSE,BSD 2-Clause "Simplified" License,https://raw.githubusercontent.com/go-check/check/v1/LICENSE +go-inf/inf,https://github.com/go-inf/inf/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/go-inf/inf/master/LICENSE +go-tomb/tomb,https://github.com/go-tomb/tomb/blob/v1/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/go-tomb/tomb/v1/LICENSE +go-yaml/yaml,https://github.com/go-yaml/yaml/blob/v2/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/go-yaml/yaml/v2/LICENSE +gotestyourself/gotest.tools,https://github.com/gotestyourself/gotest.tools/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/gotestyourself/gotest.tools/master/LICENSE +dominikh/go-tools,https://github.com/dominikh/go-tools/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/dominikh/go-tools/master/LICENSE +kubernetes/api,https://github.com/kubernetes/api/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/kubernetes/api/master/LICENSE +kubernetes/apiextensions-apiserver,https://github.com/kubernetes/apiextensions-apiserver/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/kubernetes/apiextensions-apiserver/master/LICENSE +kubernetes/apimachinery,https://github.com/kubernetes/apimachinery/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/kubernetes/apimachinery/master/LICENSE +kubernetes/client-go,https://github.com/kubernetes/client-go/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/kubernetes/client-go/master/LICENSE +kubernetes/gengo,https://github.com/kubernetes/gengo/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/kubernetes/gengo/master/LICENSE +kubernetes/klog,https://github.com/kubernetes/klog/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/kubernetes/klog/master/LICENSE +kubernetes/kube-openapi,https://github.com/kubernetes/kube-openapi/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/kubernetes/kube-openapi/master/LICENSE +kubernetes/utils,https://github.com/kubernetes/utils/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/kubernetes/utils/master/LICENSE +knative/pkg,https://github.com/knative/pkg/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/knative/pkg/master/LICENSE +vcabbage/amqp,https://github.com/vcabbage/amqp/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/vcabbage/amqp/master/LICENSE +rsc/binaryregexp,https://github.com/rsc/binaryregexp/blob/master/LICENSE,BSD 3-Clause "New" or "Revised" License,https://raw.githubusercontent.com/rsc/binaryregexp/master/LICENSE +kubernetes-sigs/controller-runtime,https://github.com/kubernetes-sigs/controller-runtime/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/kubernetes-sigs/controller-runtime/master/LICENSE +kubernetes-sigs/controller-tools,https://github.com/kubernetes-sigs/controller-tools/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/kubernetes-sigs/controller-tools/master/LICENSE +kubernetes-sigs/structured-merge-diff,https://github.com/kubernetes-sigs/structured-merge-diff/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/kubernetes-sigs/structured-merge-diff/master/LICENSE +kubernetes-sigs/testing_frameworks,https://github.com/kubernetes-retired/testing_frameworks/blob/master/LICENSE,Apache License 2.0,https://raw.githubusercontent.com/kubernetes-retired/testing_frameworks/master/LICENSE +kubernetes-sigs/yaml,https://github.com/kubernetes-sigs/yaml/blob/master/LICENSE,MIT License,https://raw.githubusercontent.com/kubernetes-sigs/yaml/master/LICENSE diff --git a/executor/licenses/repo.txt b/executor/licenses/repo.txt new file mode 100644 index 0000000000..502d361ab0 --- /dev/null +++ b/executor/licenses/repo.txt @@ -0,0 +1,141 @@ +seldonio/seldon-core +GoogleCloudPlatform/gcloud-golang +census-ecosystem/opencensus-go-exporter-prometheus +Azure/azure-sdk-for-go +Azure/go-autorest +BurntSushi/toml +NYTimes/gziphandler +PuerkitoBio/purell +PuerkitoBio/urlesc +alecthomas/template +alecthomas/units +beorn7/perks +cespare/xxhash +client9/misspell +cloudevents/sdk-go +davecgh/go-spew +dgrijalva/jwt-go +docopt/docopt-go +emicklei/go-restful +evanphx/json-patch +fatih/color +fortytw2/leaktest +fsnotify/fsnotify +ghodss/yaml +go-kit/kit +go-logfmt/logfmt +go-logr/logr +go-logr/zapr +go-openapi/jsonpointer +go-openapi/jsonreference +go-openapi/spec +go-openapi/swag +go-stack/stack +gobuffalo/flect +gogo/protobuf +golang/glog +golang/groupcache +golang/mock +golang/protobuf +google/btree +google/go-cmp +google/gofuzz +google/martian +google/pprof +google/uuid +googleapis/gax-go +googleapis/gnostic +gophercloud/gophercloud +gorilla/mux +grpc-ecosystem/go-grpc-middleware +grpc-ecosystem/go-grpc-prometheus +hashicorp/golang-lru +hpcloud/tail +imdario/mergo +inconshreveable/mousetrap +json-iterator/go +jstemmer/go-junit-report +julienschmidt/httprouter +kelseyhightower/envconfig +kisielk/errcheck +kisielk/gotool +konsorten/go-windows-terminal-sequences +kr/logfmt +kr/pretty +kr/pty +kr/text +mailru/easyjson +mattn/go-colorable +mattn/go-isatty +matttproud/golang_protobuf_extensions +modern-go/concurrent +modern-go/reflect2 +munnerz/goautoneg +mwitkow/go-conntrack +nats-io/jwt +nats-io/nats-server +nats-io/nats.go +nats-io/nkeys +nats-io/nuid +onsi/ginkgo +onsi/gomega +opentracing/opentracing-go +pborman/uuid +pkg/errors +pmezard/go-difflib +prometheus/client_golang +prometheus/client_model +prometheus/common +prometheus/procfs +sirupsen/logrus +spf13/afero +spf13/cobra +spf13/pflag +stretchr/objx +stretchr/testify +tensorflow/tensorflow +uber/jaeger-client-go +uber/jaeger-lib +census-instrumentation/opencensus-go +uber-go/atomic +uber-go/multierr +uber-go/zap +golang/crypto +golang/exp +golang/lint +golang/net +golang/oauth2 +golang/sync +golang/sys +golang/text +golang/time +golang/tools +golang/xerrors +gomodules/jsonpatch +google/google-api-go-client +golang/appengine +google/go-genproto +grpc/grpc-go +alecthomas/kingpin +go-check/check +go-inf/inf +go-tomb/tomb +go-yaml/yaml +gotestyourself/gotest.tools +dominikh/go-tools +kubernetes/api +kubernetes/apiextensions-apiserver +kubernetes/apimachinery +kubernetes/client-go +kubernetes/gengo +kubernetes/klog +kubernetes/kube-openapi +kubernetes/utils +knative/pkg +vcabbage/amqp +rsc/binaryregexp +kubernetes-sigs/controller-runtime +kubernetes-sigs/controller-tools +kubernetes-sigs/structured-merge-diff +kubernetes-sigs/testing_frameworks +kubernetes-sigs/yaml diff --git a/executor/logger/collector.go b/executor/logger/collector.go new file mode 100644 index 0000000000..27f01d4f26 --- /dev/null +++ b/executor/logger/collector.go @@ -0,0 +1,12 @@ +package logger + +import "fmt" + +// A buffered channel that we can send work requests on. +var WorkQueue = make(chan LogRequest, LoggerWorkerQueueSize) + +func QueueLogRequest(req LogRequest) error { + WorkQueue <- req + fmt.Println("Work request queued") + return nil +} diff --git a/executor/logger/constants.go b/executor/logger/constants.go new file mode 100644 index 0000000000..ea637b58f6 --- /dev/null +++ b/executor/logger/constants.go @@ -0,0 +1,12 @@ +package logger + +const ( + LoggerWorkerQueueSize = 100 + CloudEventsIdHeader = "Ce-Id" + CloudEventsTypeHeader = "Ce-type" + CloudEventsTypeSource = "Ce-source" +) + +func GetLoggerDefaultUrl(namespace string) string { + return "http://default-broker." + namespace +} diff --git a/executor/logger/dispatcher.go b/executor/logger/dispatcher.go new file mode 100644 index 0000000000..2cdeded52f --- /dev/null +++ b/executor/logger/dispatcher.go @@ -0,0 +1,32 @@ +package logger + +import ( + "github.com/go-logr/logr" +) + +var WorkerQueue chan chan LogRequest + +func StartDispatcher(nworkers int, log logr.Logger) { + // First, initialize the channel we are going to put the workers' work channels into. + WorkerQueue = make(chan chan LogRequest, nworkers) + + // Now, create all of our workers. + for i := 0; i < nworkers; i++ { + log.Info("Starting", "worker", i+1) + worker := NewWorker(i+1, WorkerQueue, log) + worker.Start() + } + + go func() { + for { + select { + case work := <-WorkQueue: + go func() { + worker := <-WorkerQueue + + worker <- work + }() + } + } + }() +} diff --git a/executor/logger/types.go b/executor/logger/types.go new file mode 100644 index 0000000000..0f7a8cea36 --- /dev/null +++ b/executor/logger/types.go @@ -0,0 +1,23 @@ +package logger + +import ( + "net/url" +) + +type LogRequestType string + +const ( + InferenceRequest LogRequestType = "Request" + InferenceResponse LogRequestType = "Response" +) + +type LogRequest struct { + Url *url.URL + Bytes *[]byte + ContentType string + ReqType LogRequestType + Id string + SourceUri *url.URL + ModelId string + RequestId string +} diff --git a/executor/logger/worker.go b/executor/logger/worker.go new file mode 100644 index 0000000000..ff52e44f0c --- /dev/null +++ b/executor/logger/worker.go @@ -0,0 +1,119 @@ +package logger + +import ( + "context" + "fmt" + "github.com/cloudevents/sdk-go" + "github.com/cloudevents/sdk-go/pkg/cloudevents/transport" + "github.com/go-logr/logr" + "github.com/seldonio/seldon-core/executor/api/payload" + "net/http" + "time" +) + +const ( + CEInferenceRequest = "io.seldon.serving.inference.request" + CEInferenceResponse = "io.seldon.serving.inference.response" + ModelIdHeader = "Model-ID" +) + +// NewWorker creates, and returns a new Worker object. Its only argument +// is a channel that the worker can add itself to whenever it is done its +// work. +func NewWorker(id int, workerQueue chan chan LogRequest, log logr.Logger) Worker { + // Create, and return the worker. + return Worker{ + Log: log, + ID: id, + Work: make(chan LogRequest), + WorkerQueue: workerQueue, + QuitChan: make(chan bool), + Client: http.Client{ + Timeout: 60 * time.Second, + }, + CeCtx: cloudevents.ContextWithEncoding(context.Background(), cloudevents.Binary), + } +} + +type Worker struct { + Log logr.Logger + ID int + Work chan LogRequest + WorkerQueue chan chan LogRequest + QuitChan chan bool + Client http.Client + CeCtx context.Context + CeTransport transport.Transport +} + +func (W *Worker) sendCloudEvent(logReq LogRequest) error { + + t, err := cloudevents.NewHTTPTransport( + cloudevents.WithTarget(logReq.Url.String()), + cloudevents.WithEncoding(cloudevents.HTTPBinaryV1), + cloudevents.WitHHeader(ModelIdHeader, logReq.ModelId), + cloudevents.WitHHeader(payload.SeldonPUIDHeader, logReq.RequestId), //FIXME add all meta data + ) + + if err != nil { + return fmt.Errorf("while creating http transport: %s", err) + } + c, err := cloudevents.NewClient(t, + cloudevents.WithTimeNow(), + ) + if err != nil { + return fmt.Errorf("while creating new cloudevents client: %s", err) + } + event := cloudevents.NewEvent(cloudevents.VersionV1) + event.SetID(logReq.Id) + if logReq.ReqType == InferenceRequest { + event.SetType(CEInferenceRequest) + } else { + event.SetType(CEInferenceResponse) + } + event.SetSource(logReq.SourceUri.String()) + event.SetDataContentType(logReq.ContentType) + if err := event.SetData(*logReq.Bytes); err != nil { + return fmt.Errorf("while setting cloudevents data: %s", err) + } + + if _, _, err := c.Send(W.CeCtx, event); err != nil { + return fmt.Errorf("while sending event: %s", err) + } + return nil +} + +// This function "starts" the worker by starting a goroutine, that is +// an infinite "for-select" loop. +func (w *Worker) Start() { + go func() { + for { + // Add ourselves into the worker queue. + w.WorkerQueue <- w.Work + + select { + case work := <-w.Work: + // Receive a work request. + fmt.Printf("worker%d: Received work request for %s\n", w.ID, work.Url.String()) + + if err := w.sendCloudEvent(work); err != nil { + w.Log.Error(err, "Failed to send log", "URL", work.Url.String()) + } + + case <-w.QuitChan: + // We have been asked to stop. + fmt.Printf("worker %d stopping\n", w.ID) + return + } + } + }() +} + +// Stop tells the worker to stop listening for work requests. +// +// Note that the worker will only stop *after* it has finished its work. +func (w *Worker) Stop() { + go func() { + w.QuitChan <- true + }() +} diff --git a/executor/main.go b/executor/main.go new file mode 100644 index 0000000000..b619891b89 --- /dev/null +++ b/executor/main.go @@ -0,0 +1,269 @@ +package main + +import ( + "context" + "encoding/base64" + "encoding/json" + "flag" + "fmt" + "github.com/ghodss/yaml" + "github.com/go-logr/logr" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/common/log" + "github.com/seldonio/seldon-core/executor/api" + seldonclient "github.com/seldonio/seldon-core/executor/api/client" + "github.com/seldonio/seldon-core/executor/api/grpc" + "github.com/seldonio/seldon-core/executor/api/grpc/seldon" + "github.com/seldonio/seldon-core/executor/api/grpc/seldon/proto" + "github.com/seldonio/seldon-core/executor/api/grpc/tensorflow" + "github.com/seldonio/seldon-core/executor/api/rest" + loghandler "github.com/seldonio/seldon-core/executor/logger" + "github.com/seldonio/seldon-core/executor/proto/tensorflow/serving" + "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + jaegercfg "github.com/uber/jaeger-client-go/config" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "os/signal" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + "strings" + "syscall" + "time" +) + +var ( + configPath = flag.String("config", "", "Path to kubconfig") + sdepName = flag.String("sdep", "", "Seldon deployment name") + namespace = flag.String("namespace", "", "Namespace") + predictorName = flag.String("predictor", "", "Name of the predictor inside the SeldonDeployment") + httpPort = flag.Int("http_port", 8080, "Executor port") + grpcPort = flag.Int("grpc_port", 8000, "Executor port") + wait = flag.Duration("graceful_timeout", time.Second*15, "Graceful shutdown secs") + protocol = flag.String("protocol", "seldon", "The payload protocol") + transport = flag.String("transport", "rest", "The network transport http or grpc") + filename = flag.String("file", "", "Load graph from file") + hostname = flag.String("hostname", "localhost", "The hostname of the running server") + logWorkers = flag.Int("logger_workers", 5, "Number of workers handling payload logging") + prometheusPath = flag.String("prometheus_path", "/metrics", "The prometheus metrics path") +) + +func getPredictorFromEnv() (*v1.PredictorSpec, error) { + b64Predictor := os.Getenv("ENGINE_PREDICTOR") + if b64Predictor != "" { + bytes, err := base64.StdEncoding.DecodeString(b64Predictor) + if err != nil { + return nil, err + } + predictor := v1.PredictorSpec{} + if err := json.Unmarshal(bytes, &predictor); err != nil { + return nil, err + } else { + return &predictor, nil + } + } else { + return nil, nil + } +} + +func getPredictorFromFile(predictorName string, filename string) (*v1.PredictorSpec, error) { + dat, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + if strings.HasSuffix(filename, "yaml") { + var sdep v1.SeldonDeployment + err = yaml.Unmarshal(dat, &sdep) + if err != nil { + return nil, err + } + for _, predictor := range sdep.Spec.Predictors { + if predictor.Name == predictorName { + return &predictor, nil + } + } + return nil, fmt.Errorf("Predictor not found %s", predictorName) + } else { + return nil, fmt.Errorf("Unsupported file type %s", filename) + } +} + +func getServerUrl(hostname string, port int) (*url.URL, error) { + return url.Parse(fmt.Sprintf("http://%s:%d/", hostname, port)) +} + +func runHttpServer(logger logr.Logger, predictor *v1.PredictorSpec, client seldonclient.SeldonApiClient, port int, + probesOnly bool, serverUrl *url.URL, namespace string, protocol string, deploymentName string, prometheusPath string) { + + // Create REST API + seldonRest := rest.NewServerRestApi(predictor, client, probesOnly, serverUrl, namespace, protocol, deploymentName, prometheusPath) + seldonRest.Initialise() + + address := fmt.Sprintf("0.0.0.0:%d", port) + logger.Info("Listening", "Address", address) + + srv := &http.Server{ + Handler: seldonRest.Router, + Addr: address, + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + go func() { + if err := srv.ListenAndServe(); err != nil { + logger.Error(err, "Server error") + } + }() + + c := make(chan os.Signal, 1) + // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C) and SIGTERM + // SIGKILL, SIGQUIT will not be caught. + signal.Notify(c, syscall.SIGINT) + signal.Notify(c, syscall.SIGTERM) + + // Block until we receive our signal. + <-c + + // Create a deadline to wait for. + ctx, cancel := context.WithTimeout(context.Background(), *wait) + defer cancel() + // Doesn't block if no connections, but will otherwise wait + // until the timeout deadline. + srv.Shutdown(ctx) + // Optionally, you could run srv.Shutdown in a goroutine and block on + // <-ctx.Done() if your application should wait for other services + // to finalize based on context cancellation. + logger.Info("shutting down") + os.Exit(0) + +} + +func runGrpcServer(logger logr.Logger, predictor *v1.PredictorSpec, client seldonclient.SeldonApiClient, port int, serverUrl *url.URL, namespace string, protocol string, deploymentName string) { + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + grpcServer := grpc.CreateGrpcServer(predictor, deploymentName) + if protocol == api.ProtocolSeldon { + seldonGrpcServer := seldon.NewGrpcSeldonServer(predictor, client, serverUrl, namespace) + proto.RegisterSeldonServer(grpcServer, seldonGrpcServer) + } else { + tensorflowGrpcServer := tensorflow.NewGrpcTensorflowServer(predictor, client, serverUrl, namespace) + serving.RegisterPredictionServiceServer(grpcServer, tensorflowGrpcServer) + serving.RegisterModelServiceServer(grpcServer, tensorflowGrpcServer) + } + err = grpcServer.Serve(lis) + if err != nil { + log.Errorf("Grpc server error: %v", err) + } +} + +func initTracing() io.Closer { + //Initialise tracing + cfg, err := jaegercfg.FromEnv() + if err != nil { + // parsing errors might happen here, such as when we get a string where we expect a number + log.Fatal("Could not parse Jaeger env vars", err.Error()) + } + + if cfg.ServiceName == "" { + cfg.ServiceName = "executor" + } + + tracer, closer, err := cfg.NewTracer() + if err != nil { + log.Fatal("Could not initialize jaeger tracer:", err.Error()) + } + + opentracing.SetGlobalTracer(tracer) + + return closer +} + +func main() { + flag.Parse() + + if *sdepName == "" { + log.Fatal("Seldon deployment name must be provided") + } + + if *namespace == "" { + log.Fatal("Namespace must be provied") + } + + if *predictorName == "" { + log.Fatal("Predictor must be provied") + } + + if !(*protocol == api.ProtocolSeldon || *protocol == api.ProtocolTensorflow) { + log.Fatal("Protocol must be seldon or tensorflow") + } + + if !(*transport == "rest" || *transport == "grpc") { + log.Fatal("Only rest and grpc supported") + } + + serverUrl, err := getServerUrl(*hostname, *httpPort) + if err != nil { + log.Fatal("Failed to create server url from", *hostname, *httpPort) + } + + logf.SetLogger(logf.ZapLogger(false)) + logger := logf.Log.WithName("entrypoint") + + logger.Info("Flags", "transport", *transport) + + var predictor *v1.PredictorSpec + if *filename != "" { + logger.Info("Trying to get predictor from file") + predictor, err = getPredictorFromFile(*predictorName, *filename) + if err != nil { + logger.Error(err, "Failed to get predictor from file") + panic(err) + } + } else { + logger.Info("Trying to get predictor from Env") + predictor, err = getPredictorFromEnv() + if err != nil { + logger.Error(err, "Failed to get predictor from Env") + panic(err) + } else if predictor == nil { + logger.Info("Trying to get predictor from API") + seldonDeploymentClient := seldonclient.NewSeldonDeploymentClient(configPath) + predictor, err = seldonDeploymentClient.GetPredictor(*sdepName, *namespace, *predictorName) + if err != nil { + logger.Error(err, "Failed to find predictor", "name", predictor) + panic(err) + } + } + } + + //Start Logger Dispacther + loghandler.StartDispatcher(*logWorkers, logger) + + //Init Tracing + closer := initTracing() + defer closer.Close() + + if *transport == "rest" { + clientRest := rest.NewJSONRestClient(*protocol, *sdepName, predictor) + logger.Info("Running http server ", "port", *httpPort) + runHttpServer(logger, predictor, clientRest, *httpPort, false, serverUrl, *namespace, *protocol, *sdepName, *prometheusPath) + } else { + logger.Info("Running http probes only server ", "port", *httpPort) + go runHttpServer(logger, predictor, nil, *httpPort, true, serverUrl, *namespace, *protocol, *sdepName, *prometheusPath) + logger.Info("Running grpc server ", "port", *grpcPort) + var clientGrpc seldonclient.SeldonApiClient + if *protocol == "seldon" { + clientGrpc = seldon.NewSeldonGrpcClient(predictor, *sdepName) + } else { + clientGrpc = tensorflow.NewTensorflowGrpcClient(predictor, *sdepName) + } + runGrpcServer(logger, predictor, clientGrpc, *grpcPort, serverUrl, *namespace, *protocol, *sdepName) + + } + +} diff --git a/executor/predictor/components.go b/executor/predictor/components.go new file mode 100644 index 0000000000..5882377537 --- /dev/null +++ b/executor/predictor/components.go @@ -0,0 +1,24 @@ +package predictor + +import ( + "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + "math/rand" + "strconv" +) + +func (p *PredictorProcess) abTestRouter(node *v1.PredictiveUnit) (int, error) { + ratioA := 0.5 + var err error + if len(node.Parameters) == 1 && node.Parameters[0].Name == "ratioA" { + ratioA, err = strconv.ParseFloat(node.Parameters[0].Value, 32) + if err != nil { + return 0, err + } + } + + if rand.Float64() < ratioA { + return 0, nil + } else { + return 1, nil + } +} diff --git a/executor/predictor/predictor_process.go b/executor/predictor/predictor_process.go new file mode 100644 index 0000000000..8909efb616 --- /dev/null +++ b/executor/predictor/predictor_process.go @@ -0,0 +1,311 @@ +package predictor + +import ( + "context" + "fmt" + "github.com/go-logr/logr" + guuid "github.com/google/uuid" + "github.com/seldonio/seldon-core/executor/api/client" + "github.com/seldonio/seldon-core/executor/api/payload" + payloadLogger "github.com/seldonio/seldon-core/executor/logger" + "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + "net/url" + "sync" +) + +type PredictorProcess struct { + Ctx context.Context + Client client.SeldonApiClient + Log logr.Logger + ServerUrl *url.URL + Namespace string + Meta *payload.MetaData +} + +func NewPredictorProcess(context context.Context, client client.SeldonApiClient, log logr.Logger, serverUrl *url.URL, namespace string, meta map[string][]string) PredictorProcess { + return PredictorProcess{ + Ctx: context, + Client: client, + Log: log, + ServerUrl: serverUrl, + Namespace: namespace, + Meta: payload.NewFromMap(meta), + } +} + +func hasMethod(method v1.PredictiveUnitMethod, methods *[]v1.PredictiveUnitMethod) bool { + if methods != nil { + for _, m := range *methods { + if m == method { + return true + } + } + } + return false +} + +func (p *PredictorProcess) transformInput(node *v1.PredictiveUnit, msg payload.SeldonPayload) (payload.SeldonPayload, error) { + callModel := false + callTransformInput := false + if (*node).Type != nil { + switch *node.Type { + case v1.MODEL: + callModel = true + case v1.TRANSFORMER: + callTransformInput = true + } + } + if hasMethod(v1.TRANSFORM_INPUT, node.Methods) { + callTransformInput = true + } + if callModel { + msg, err := p.Client.Chain(p.Ctx, node.Name, msg) + if err != nil { + return nil, err + } + return p.Client.Predict(p.Ctx, node.Name, node.Endpoint.ServiceHost, node.Endpoint.ServicePort, msg, p.Meta.Meta) + } else if callTransformInput { + msg, err := p.Client.Chain(p.Ctx, node.Name, msg) + if err != nil { + return nil, err + } + return p.Client.TransformInput(p.Ctx, node.Name, node.Endpoint.ServiceHost, node.Endpoint.ServicePort, msg, p.Meta.Meta) + } else { + return msg, nil + } + +} + +func (p *PredictorProcess) transformOutput(node *v1.PredictiveUnit, msg payload.SeldonPayload) (payload.SeldonPayload, error) { + callClient := false + if (*node).Type != nil { + switch *node.Type { + case v1.OUTPUT_TRANSFORMER: + callClient = true + } + } + if hasMethod(v1.TRANSFORM_OUTPUT, node.Methods) { + callClient = true + } + + if callClient { + msg, err := p.Client.Chain(p.Ctx, node.Name, msg) + if err != nil { + return nil, err + } + return p.Client.TransformOutput(p.Ctx, node.Name, node.Endpoint.ServiceHost, node.Endpoint.ServicePort, msg, p.Meta.Meta) + } else { + return msg, nil + } + +} + +func (p *PredictorProcess) feedback(node *v1.PredictiveUnit, msg payload.SeldonPayload) (payload.SeldonPayload, error) { + callClient := false + if (*node).Type != nil { + switch *node.Type { + case v1.MODEL: + callClient = true + } + } + if hasMethod(v1.SEND_FEEDBACK, node.Methods) { + callClient = true + } + + if callClient { + return p.Client.Feedback(p.Ctx, node.Name, node.Endpoint.ServiceHost, node.Endpoint.ServicePort, msg, p.Meta.Meta) + } else { + return msg, nil + } + +} + +func (p *PredictorProcess) route(node *v1.PredictiveUnit, msg payload.SeldonPayload) (int, error) { + callClient := false + if (*node).Type != nil { + switch *node.Type { + case v1.ROUTER: + callClient = true + } + } + if hasMethod(v1.ROUTE, node.Methods) { + callClient = true + } + if callClient { + return p.Client.Route(p.Ctx, node.Name, node.Endpoint.ServiceHost, node.Endpoint.ServicePort, msg, p.Meta.Meta) + } else if node.Implementation != nil && *node.Implementation == v1.RANDOM_ABTEST { + return p.abTestRouter(node) + } else { + return -1, nil + } +} + +func (p *PredictorProcess) aggregate(node *v1.PredictiveUnit, msg []payload.SeldonPayload) (payload.SeldonPayload, error) { + callClient := false + if (*node).Type != nil { + switch *node.Type { + case v1.COMBINER: + callClient = true + } + } + if hasMethod(v1.AGGREGATE, node.Methods) { + callClient = true + } + + if callClient { + return p.Client.Combine(p.Ctx, node.Name, node.Endpoint.ServiceHost, node.Endpoint.ServicePort, msg, p.Meta.Meta) + } else { + return msg[0], nil + } + +} + +func (p *PredictorProcess) predictChildren(node *v1.PredictiveUnit, msg payload.SeldonPayload) (payload.SeldonPayload, error) { + if node.Children != nil && len(node.Children) > 0 { + route, err := p.route(node, msg) + if err != nil { + return nil, err + } + var cmsgs []payload.SeldonPayload + if route == -1 { + cmsgs = make([]payload.SeldonPayload, len(node.Children)) + var errs = make([]error, len(node.Children)) + wg := sync.WaitGroup{} + for i, nodeChild := range node.Children { + wg.Add(1) + go func(i int, nodeChild v1.PredictiveUnit, msg payload.SeldonPayload) { + cmsgs[i], errs[i] = p.Predict(&nodeChild, msg) + wg.Done() + }(i, nodeChild, msg) + } + wg.Wait() + for i, err := range errs { + if err != nil { + return cmsgs[i], err + } + } + } else { + cmsgs = make([]payload.SeldonPayload, 1) + cmsgs[0], err = p.Predict(&node.Children[route], msg) + if err != nil { + return cmsgs[0], err + } + } + return p.aggregate(node, cmsgs) + } else { + return msg, nil + } +} + +func (p *PredictorProcess) feedbackChildren(node *v1.PredictiveUnit, msg payload.SeldonPayload) (payload.SeldonPayload, error) { + if node.Children != nil && len(node.Children) > 0 { + route, err := p.route(node, msg) + if err != nil { + return nil, err + } + var cmsgs []payload.SeldonPayload + if route == -1 { + cmsgs = make([]payload.SeldonPayload, len(node.Children)) + var errs = make([]error, len(node.Children)) + wg := sync.WaitGroup{} + for i, nodeChild := range node.Children { + wg.Add(1) + go func(i int, nodeChild v1.PredictiveUnit, msg payload.SeldonPayload) { + cmsgs[i], errs[i] = p.Feedback(&nodeChild, msg) + wg.Done() + }(i, nodeChild, msg) + } + wg.Wait() + for i, err := range errs { + if err != nil { + return cmsgs[i], err + } + } + } else { + cmsgs = make([]payload.SeldonPayload, 1) + cmsgs[0], err = p.Feedback(&node.Children[route], msg) + if err != nil { + return cmsgs[0], err + } + } + return p.aggregate(node, cmsgs) + } else { + return msg, nil + } +} + +func (p *PredictorProcess) getLogUrl(logger *v1.Logger) (*url.URL, error) { + if logger.Url != nil { + return url.Parse(*logger.Url) + } else { + return url.Parse(payloadLogger.GetLoggerDefaultUrl(p.Namespace)) + } +} + +func (p *PredictorProcess) logPayload(nodeName string, logger *v1.Logger, reqType payloadLogger.LogRequestType, msg payload.SeldonPayload) error { + data, err := msg.GetBytes() + if err != nil { + return err + } + logUrl, err := p.getLogUrl(logger) + if err != nil { + return err + } + + payloadLogger.QueueLogRequest(payloadLogger.LogRequest{ + Url: logUrl, + Bytes: &data, + ContentType: msg.GetContentType(), + ReqType: reqType, + Id: guuid.New().String(), + SourceUri: p.ServerUrl, + ModelId: nodeName, + RequestId: p.Ctx.Value(payload.SeldonPUIDHeader).(string), + }) + return nil +} + +func (p *PredictorProcess) Predict(node *v1.PredictiveUnit, msg payload.SeldonPayload) (payload.SeldonPayload, error) { + //Log Request + if node.Logger != nil && (node.Logger.Mode == v1.LogRequest || node.Logger.Mode == v1.LogAll) { + p.logPayload(node.Name, node.Logger, payloadLogger.InferenceRequest, msg) + } + tmsg, err := p.transformInput(node, msg) + if err != nil { + return tmsg, err + } + cmsg, err := p.predictChildren(node, tmsg) + if err != nil { + return tmsg, err + } + response, err := p.transformOutput(node, cmsg) + // Log Response + if err == nil && node.Logger != nil && (node.Logger.Mode == v1.LogResponse || node.Logger.Mode == v1.LogAll) { + p.logPayload(node.Name, node.Logger, payloadLogger.InferenceResponse, response) + } + return response, err +} + +func (p *PredictorProcess) Status(node *v1.PredictiveUnit, modelName string, msg payload.SeldonPayload) (payload.SeldonPayload, error) { + if nodeModel := v1.GetPredictiveUnit(node, modelName); nodeModel == nil { + return nil, fmt.Errorf("Failed to find model %s", modelName) + } else { + return p.Client.Status(p.Ctx, modelName, nodeModel.Endpoint.ServiceHost, nodeModel.Endpoint.ServicePort, msg, p.Meta.Meta) + } +} + +func (p *PredictorProcess) Metadata(node *v1.PredictiveUnit, modelName string, msg payload.SeldonPayload) (payload.SeldonPayload, error) { + if nodeModel := v1.GetPredictiveUnit(node, modelName); nodeModel == nil { + return nil, fmt.Errorf("Failed to find model %s", modelName) + } else { + return p.Client.Metadata(p.Ctx, modelName, nodeModel.Endpoint.ServiceHost, nodeModel.Endpoint.ServicePort, msg, p.Meta.Meta) + } +} + +func (p *PredictorProcess) Feedback(node *v1.PredictiveUnit, msg payload.SeldonPayload) (payload.SeldonPayload, error) { + tmsg, err := p.feedbackChildren(node, msg) + if err != nil { + return tmsg, err + } + return p.feedback(node, msg) +} diff --git a/executor/predictor/predictor_process_test.go b/executor/predictor/predictor_process_test.go new file mode 100644 index 0000000000..bb18c5e151 --- /dev/null +++ b/executor/predictor/predictor_process_test.go @@ -0,0 +1,446 @@ +package predictor + +import ( + "context" + "errors" + "github.com/golang/protobuf/jsonpb" + . "github.com/onsi/gomega" + "github.com/seldonio/seldon-core/executor/api/grpc" + "github.com/seldonio/seldon-core/executor/api/grpc/seldon/proto" + "github.com/seldonio/seldon-core/executor/api/payload" + "github.com/seldonio/seldon-core/executor/api/test" + "github.com/seldonio/seldon-core/executor/logger" + v1 "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + "net/http" + "net/http/httptest" + "net/url" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + "testing" +) + +const ( + testSourceUrl = "http://localhost" + testSeldonPuid = "1" + testCustomMetaKey = "key" + testCustomMetaValue = "foo" +) + +func createPredictorProcess(t *testing.T) *PredictorProcess { + url, _ := url.Parse(testSourceUrl) + ctx := context.WithValue(context.TODO(), payload.SeldonPUIDHeader, testSeldonPuid) + pp := NewPredictorProcess(ctx, test.NewSeldonMessageTestClient(t, -1, nil, nil), logf.Log.WithName("SeldonMessageRestClient"), url, "default", map[string][]string{testCustomMetaKey: []string{testCustomMetaValue}}) + return &pp +} + +func createPredictorProcessWithRoute(t *testing.T, chosenRoute int) *PredictorProcess { + url, _ := url.Parse(testSourceUrl) + ctx := context.WithValue(context.TODO(), payload.SeldonPUIDHeader, testSeldonPuid) + pp := NewPredictorProcess(ctx, test.NewSeldonMessageTestClient(t, chosenRoute, nil, nil), logf.Log.WithName("SeldonMessageRestClient"), url, "default", map[string][]string{}) + return &pp +} + +func createPredictorProcessWithError(t *testing.T, errMethod *v1.PredictiveUnitMethod, err error) *PredictorProcess { + url, _ := url.Parse(testSourceUrl) + ctx := context.WithValue(context.TODO(), payload.SeldonPUIDHeader, testSeldonPuid) + pp := NewPredictorProcess(ctx, test.NewSeldonMessageTestClient(t, -1, errMethod, err), logf.Log.WithName("SeldonMessageRestClient"), url, "default", map[string][]string{}) + return &pp +} + +func createPredictPayload(g *GomegaWithT) payload.SeldonPayload { + var sm proto.SeldonMessage + var data = ` {"data":{"ndarray":[1.1,2.0]}}` + err := jsonpb.UnmarshalString(data, &sm) + g.Expect(err).Should(BeNil()) + return &payload.ProtoPayload{Msg: &sm} +} + +func createFeedbackPayload(g *GomegaWithT) payload.SeldonPayload { + var sm proto.Feedback + var data = ` {"request":{"data":{"ndarray":[1.1,2.0]}}}` + err := jsonpb.UnmarshalString(data, &sm) + g.Expect(err).Should(BeNil()) + return &payload.ProtoPayload{Msg: &sm} +} + +func TestModel(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + model := v1.MODEL + graph := &v1.PredictiveUnit{ + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + } + + pResp, err := createPredictorProcess(t).Predict(graph, createPredictPayload(g)) + g.Expect(err).Should(BeNil()) + smRes := pResp.GetPayload().(*proto.SeldonMessage) + g.Expect(smRes.GetData().GetNdarray().Values[0].GetNumberValue()).Should(Equal(1.1)) + g.Expect(smRes.GetData().GetNdarray().Values[1].GetNumberValue()).Should(Equal(2.0)) +} + +func TestStatus(t *testing.T) { + t.Logf("Started") + modelName := "mymodel" + g := NewGomegaWithT(t) + model := v1.MODEL + graph := &v1.PredictiveUnit{ + Name: modelName, + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + } + + pResp, err := createPredictorProcess(t).Status(graph, modelName, nil) + g.Expect(err).Should(BeNil()) + smRes := string(pResp.GetPayload().([]byte)) + g.Expect(smRes).To(Equal(test.TestClientStatusResponse)) + +} + +func TestMetadata(t *testing.T) { + t.Logf("Started") + modelName := "mymodel" + g := NewGomegaWithT(t) + model := v1.MODEL + graph := &v1.PredictiveUnit{ + Name: modelName, + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + } + + pResp, err := createPredictorProcess(t).Metadata(graph, modelName, nil) + g.Expect(err).Should(BeNil()) + smRes := string(pResp.GetPayload().([]byte)) + g.Expect(smRes).To(Equal(test.TestClientMetadataResponse)) + +} + +func TestTwoLevelModel(t *testing.T) { + g := NewGomegaWithT(t) + model := v1.MODEL + graph := &v1.PredictiveUnit{ + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + Children: []v1.PredictiveUnit{ + { + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo2", + ServicePort: 9001, + Type: v1.REST, + }, + }, + }, + } + + pResp, err := createPredictorProcess(t).Predict(graph, createPredictPayload(g)) + g.Expect(err).Should(BeNil()) + smRes := pResp.GetPayload().(*proto.SeldonMessage) + g.Expect(smRes.GetData().GetNdarray().Values[0].GetNumberValue()).Should(Equal(1.1)) + g.Expect(smRes.GetData().GetNdarray().Values[1].GetNumberValue()).Should(Equal(2.0)) +} + +func TestCombiner(t *testing.T) { + g := NewGomegaWithT(t) + model := v1.MODEL + combiner := v1.COMBINER + graph := &v1.PredictiveUnit{ + Type: &combiner, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + Children: []v1.PredictiveUnit{ + { + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo2", + ServicePort: 9001, + Type: v1.REST, + }, + }, + { + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo3", + ServicePort: 9002, + Type: v1.REST, + }, + }, + }, + } + + pResp, err := createPredictorProcess(t).Predict(graph, createPredictPayload(g)) + g.Expect(err).Should(BeNil()) + smRes := pResp.GetPayload().(*proto.SeldonMessage) + g.Expect(smRes.GetData().GetNdarray().Values[0].GetNumberValue()).Should(Equal(1.1)) + g.Expect(smRes.GetData().GetNdarray().Values[1].GetNumberValue()).Should(Equal(2.0)) +} + +func TestMethods(t *testing.T) { + g := NewGomegaWithT(t) + //model := v1.UNKNOWN_TYPE + graph := &v1.PredictiveUnit{ + Methods: &[]v1.PredictiveUnitMethod{v1.TRANSFORM_INPUT, v1.TRANSFORM_OUTPUT, v1.ROUTE, v1.AGGREGATE}, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + Children: []v1.PredictiveUnit{ + { + Methods: &[]v1.PredictiveUnitMethod{v1.TRANSFORM_INPUT, v1.TRANSFORM_OUTPUT}, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo2", + ServicePort: 9001, + Type: v1.REST, + }, + }, + }, + } + + pResp, err := createPredictorProcess(t).Predict(graph, createPredictPayload(g)) + g.Expect(err).Should(BeNil()) + smRes := pResp.GetPayload().(*proto.SeldonMessage) + g.Expect(smRes.GetData().GetNdarray().Values[0].GetNumberValue()).Should(Equal(1.1)) + g.Expect(smRes.GetData().GetNdarray().Values[1].GetNumberValue()).Should(Equal(2.0)) +} + +func TestFeedback(t *testing.T) { + g := NewGomegaWithT(t) + //model := v1.UNKNOWN_TYPE + graph := &v1.PredictiveUnit{ + Methods: &[]v1.PredictiveUnitMethod{v1.SEND_FEEDBACK}, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + Children: []v1.PredictiveUnit{ + { + Methods: &[]v1.PredictiveUnitMethod{v1.SEND_FEEDBACK}, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo2", + ServicePort: 9001, + Type: v1.REST, + }, + }, + }, + } + + pResp, err := createPredictorProcess(t).Feedback(graph, createFeedbackPayload(g)) + g.Expect(err).Should(BeNil()) + smRes := pResp.GetPayload().(*proto.SeldonMessage) + g.Expect(smRes.GetData().GetNdarray().Values[0].GetNumberValue()).Should(Equal(1.1)) + g.Expect(smRes.GetData().GetNdarray().Values[1].GetNumberValue()).Should(Equal(2.0)) +} + +func TestRouter(t *testing.T) { + g := NewGomegaWithT(t) + model := v1.MODEL + router := v1.ROUTER + graph := &v1.PredictiveUnit{ + Type: &router, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + Children: []v1.PredictiveUnit{ + { + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo2", + ServicePort: 9001, + Type: v1.REST, + }, + }, + { + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo3", + ServicePort: 9002, + Type: v1.REST, + }, + }, + }, + } + + pResp, err := createPredictorProcess(t).Predict(graph, createPredictPayload(g)) + g.Expect(err).Should(BeNil()) + smRes := pResp.GetPayload().(*proto.SeldonMessage) + g.Expect(smRes.GetData().GetNdarray().Values[0].GetNumberValue()).Should(Equal(1.1)) + g.Expect(smRes.GetData().GetNdarray().Values[1].GetNumberValue()).Should(Equal(2.0)) + + pResp, err = createPredictorProcessWithRoute(t, 0).Predict(graph, createPredictPayload(g)) + g.Expect(err).Should(BeNil()) + smRes = pResp.GetPayload().(*proto.SeldonMessage) + g.Expect(smRes.GetData().GetNdarray().Values[0].GetNumberValue()).Should(Equal(1.1)) + g.Expect(smRes.GetData().GetNdarray().Values[1].GetNumberValue()).Should(Equal(2.0)) + + pResp, err = createPredictorProcessWithRoute(t, 1).Predict(graph, createPredictPayload(g)) + g.Expect(err).Should(BeNil()) + smRes = pResp.GetPayload().(*proto.SeldonMessage) + g.Expect(smRes.GetData().GetNdarray().Values[0].GetNumberValue()).Should(Equal(1.1)) + g.Expect(smRes.GetData().GetNdarray().Values[1].GetNumberValue()).Should(Equal(2.0)) +} + +func TestModelError(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + model := v1.MODEL + graph := &v1.PredictiveUnit{ + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + } + + errMethod := v1.TRANSFORM_INPUT + chosenErr := errors.New("something bad happened") + pResp, err := createPredictorProcessWithError(t, &errMethod, chosenErr).Predict(graph, createPredictPayload(g)) + g.Expect(err).ShouldNot(BeNil()) + g.Expect(pResp).Should(BeNil()) + g.Expect(err.Error()).Should(Equal("something bad happened")) +} + +func TestABTest(t *testing.T) { + g := NewGomegaWithT(t) + model := v1.MODEL + abtest := v1.RANDOM_ABTEST + graph := &v1.PredictiveUnit{ + Implementation: &abtest, + Children: []v1.PredictiveUnit{ + { + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo2", + ServicePort: 9001, + Type: v1.REST, + }, + }, + { + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo3", + ServicePort: 9002, + Type: v1.REST, + }, + }, + }, + } + + pResp, err := createPredictorProcess(t).Predict(graph, createPredictPayload(g)) + g.Expect(err).Should(BeNil()) + smRes := pResp.GetPayload().(*proto.SeldonMessage) + g.Expect(smRes.GetData().GetNdarray().Values[0].GetNumberValue()).Should(Equal(1.1)) + g.Expect(smRes.GetData().GetNdarray().Values[1].GetNumberValue()).Should(Equal(2.0)) +} + +func TestModelWithLogRequests(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + modelName := "foo" + logged := false + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + //g.Expect(r.Header.Get(logger.CloudEventsIdHeader)).Should(Equal(testEventId)) + g.Expect(r.Header.Get(logger.CloudEventsTypeHeader)).To(Equal(logger.CEInferenceRequest)) + g.Expect(r.Header.Get(logger.CloudEventsTypeSource)).To(Equal(testSourceUrl)) + g.Expect(r.Header.Get(logger.ModelIdHeader)).To(Equal(modelName)) + g.Expect(r.Header.Get("Content-Type")).To(Equal(grpc.ProtobufContentType)) + g.Expect(r.Header.Get(payload.SeldonPUIDHeader)).To(Equal(testSeldonPuid)) + w.Write([]byte("")) + logged = true + }) + server := httptest.NewServer(handler) + defer server.Close() + + logf.SetLogger(logf.ZapLogger(false)) + log := logf.Log.WithName("entrypoint") + logger.StartDispatcher(1, log) + + model := v1.MODEL + graph := &v1.PredictiveUnit{ + Name: modelName, + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + Logger: &v1.Logger{ + Mode: v1.LogRequest, + Url: &server.URL, + }, + } + + pResp, err := createPredictorProcess(t).Predict(graph, createPredictPayload(g)) + g.Expect(err).Should(BeNil()) + smRes := pResp.GetPayload().(*proto.SeldonMessage) + g.Expect(smRes.GetData().GetNdarray().Values[0].GetNumberValue()).Should(Equal(1.1)) + g.Expect(smRes.GetData().GetNdarray().Values[1].GetNumberValue()).Should(Equal(2.0)) + g.Eventually(func() bool { return logged }).Should(Equal(true)) +} + +func TestModelWithLogResponses(t *testing.T) { + t.Logf("Started") + g := NewGomegaWithT(t) + modelName := "foo" + logged := false + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + //g.Expect(r.Header.Get(logger.CloudEventsIdHeader)).Should(Equal(testEventId)) + g.Expect(r.Header.Get(logger.CloudEventsTypeHeader)).To(Equal(logger.CEInferenceResponse)) + g.Expect(r.Header.Get(logger.CloudEventsTypeSource)).To(Equal(testSourceUrl)) + g.Expect(r.Header.Get(logger.ModelIdHeader)).To(Equal(modelName)) + g.Expect(r.Header.Get("Content-Type")).To(Equal(grpc.ProtobufContentType)) + g.Expect(r.Header.Get(payload.SeldonPUIDHeader)).To(Equal(testSeldonPuid)) + w.Write([]byte("")) + logged = true + }) + server := httptest.NewServer(handler) + defer server.Close() + + logf.SetLogger(logf.ZapLogger(false)) + log := logf.Log.WithName("entrypoint") + logger.StartDispatcher(1, log) + + model := v1.MODEL + graph := &v1.PredictiveUnit{ + Name: modelName, + Type: &model, + Endpoint: &v1.Endpoint{ + ServiceHost: "foo", + ServicePort: 9000, + Type: v1.REST, + }, + Logger: &v1.Logger{ + Mode: v1.LogResponse, + Url: &server.URL, + }, + } + + pResp, err := createPredictorProcess(t).Predict(graph, createPredictPayload(g)) + g.Expect(err).Should(BeNil()) + smRes := pResp.GetPayload().(*proto.SeldonMessage) + g.Expect(smRes.GetData().GetNdarray().Values[0].GetNumberValue()).Should(Equal(1.1)) + g.Expect(smRes.GetData().GetNdarray().Values[1].GetNumberValue()).Should(Equal(2.0)) + g.Eventually(func() bool { return logged }).Should(Equal(true)) +} diff --git a/executor/predictor/ready_checker.go b/executor/predictor/ready_checker.go new file mode 100644 index 0000000000..85455df494 --- /dev/null +++ b/executor/predictor/ready_checker.go @@ -0,0 +1,27 @@ +package predictor + +import ( + "fmt" + "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + "net" +) + +func Ready(node *v1.PredictiveUnit) error { + for _, child := range node.Children { + err := Ready(&child) + if err != nil { + return err + } + } + if node.Endpoint != nil && node.Endpoint.ServiceHost != "" && node.Endpoint.ServicePort > 0 { + c, err := net.Dial("tcp", fmt.Sprintf("%s:%d", node.Endpoint.ServiceHost, node.Endpoint.ServicePort)) + if err != nil { + return err + } else { + err = c.Close() + return nil + } + } else { + return nil + } +} diff --git a/executor/proto/model_service.proto b/executor/proto/model_service.proto new file mode 100644 index 0000000000..29a3b07751 --- /dev/null +++ b/executor/proto/model_service.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +option cc_enable_arenas = true; + +import "tensorflow_serving/apis/get_model_status.proto"; +import "tensorflow_serving/apis/model_management.proto"; + +package tensorflow.serving; + +// ModelService provides methods to query and update the state of the server, +// e.g. which models/versions are being served. +service ModelService { + // Gets status of model. If the ModelSpec in the request does not specify + // version, information about all versions of the model will be returned. If + // the ModelSpec in the request does specify a version, the status of only + // that version will be returned. + rpc GetModelStatus(GetModelStatusRequest) returns (GetModelStatusResponse); + + // Reloads the set of served models. The new config supersedes the old one, + // so if a model is omitted from the new config it will be unloaded and no + // longer served. + rpc HandleReloadConfigRequest(ReloadConfigRequest) + returns (ReloadConfigResponse); +} diff --git a/executor/proto/prediction.proto b/executor/proto/prediction.proto new file mode 100644 index 0000000000..5a7536cc7b --- /dev/null +++ b/executor/proto/prediction.proto @@ -0,0 +1,130 @@ +syntax = "proto3"; + +import "google/protobuf/struct.proto"; +import "tensorflow/core/framework/tensor.proto"; + +package seldon.protos; + +option java_package = "io.seldon.protos"; +option java_outer_classname = "PredictionProtos"; +option go_package = "github.com/seldonio/seldon-core/examples/wrappers/go/pkg/api"; + +// [START Messages] + +message SeldonMessage { + + Status status = 1; + Meta meta = 2; + oneof data_oneof { + DefaultData data = 3; + bytes binData = 4; + string strData = 5; + google.protobuf.Value jsonData = 6; + } +} + +message DefaultData { + repeated string names = 1; + oneof data_oneof { + Tensor tensor = 2; + google.protobuf.ListValue ndarray = 3; + tensorflow.TensorProto tftensor = 4; + } +} + +message Tensor { + repeated int32 shape = 1 [packed=true]; + repeated double values = 2 [packed=true]; +} + +message Meta { + string puid = 1; + map tags = 2; + map routing = 3; + map requestPath = 4; + repeated Metric metrics = 5; +} + +message Metric { + enum MetricType { + COUNTER = 0; + GAUGE = 1; + TIMER = 2; + } + string key = 1; + MetricType type = 2; + float value = 3; + map tags = 4; +} + +message SeldonMessageList { + repeated SeldonMessage seldonMessages = 1; +} + +message Status { + + enum StatusFlag { + SUCCESS = 0; + FAILURE = 1; + } + + int32 code = 1; + string info = 2; + string reason = 3; + StatusFlag status = 4; +} + +message Feedback { + SeldonMessage request = 1; + SeldonMessage response = 2; + float reward = 3; + SeldonMessage truth = 4; +} + +message RequestResponse { + SeldonMessage request = 1; + SeldonMessage response = 2; +} + +// [END Messages] + + +// [START Services] + +service Generic { + rpc TransformInput(SeldonMessage) returns (SeldonMessage) {}; + rpc TransformOutput(SeldonMessage) returns (SeldonMessage) {}; + rpc Route(SeldonMessage) returns (SeldonMessage) {}; + rpc Aggregate(SeldonMessageList) returns (SeldonMessage) {}; + rpc SendFeedback(Feedback) returns (SeldonMessage) {}; +} + +service Model { + rpc Predict(SeldonMessage) returns (SeldonMessage) {}; + rpc SendFeedback(Feedback) returns (SeldonMessage) {}; + } + +service Router { + rpc Route(SeldonMessage) returns (SeldonMessage) {}; + rpc SendFeedback(Feedback) returns (SeldonMessage) {}; + } + +service Transformer { + rpc TransformInput(SeldonMessage) returns (SeldonMessage) {}; +} + +service OutputTransformer { + rpc TransformOutput(SeldonMessage) returns (SeldonMessage) {}; +} + +service Combiner { + rpc Aggregate(SeldonMessageList) returns (SeldonMessage) {}; +} + + +service Seldon { + rpc Predict(SeldonMessage) returns (SeldonMessage) {}; + rpc SendFeedback(Feedback) returns (SeldonMessage) {}; + } + +// [END Services] diff --git a/executor/proto/prediction_service.proto b/executor/proto/prediction_service.proto new file mode 100644 index 0000000000..44e655417f --- /dev/null +++ b/executor/proto/prediction_service.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; + +package tensorflow.serving; +option cc_enable_arenas = true; + +import "tensorflow_serving/apis/classification.proto"; +import "tensorflow_serving/apis/get_model_metadata.proto"; +import "tensorflow_serving/apis/inference.proto"; +import "tensorflow_serving/apis/predict.proto"; +import "tensorflow_serving/apis/regression.proto"; + +// open source marker; do not remove +// PredictionService provides access to machine-learned models loaded by +// model_servers. +service PredictionService { + // Classify. + rpc Classify(ClassificationRequest) returns (ClassificationResponse); + + // Regress. + rpc Regress(RegressionRequest) returns (RegressionResponse); + + // Predict -- provides access to loaded TensorFlow model. + rpc Predict(PredictRequest) returns (PredictResponse); + + // MultiInference API for multi-headed models. + rpc MultiInference(MultiInferenceRequest) returns (MultiInferenceResponse); + + // GetModelMetadata - provides access to metadata for loaded models. + rpc GetModelMetadata(GetModelMetadataRequest) + returns (GetModelMetadataResponse); +} diff --git a/executor/proto/tensorflow/compiler/jit/xla_activity.proto b/executor/proto/tensorflow/compiler/jit/xla_activity.proto new file mode 100644 index 0000000000..50bfb297fa --- /dev/null +++ b/executor/proto/tensorflow/compiler/jit/xla_activity.proto @@ -0,0 +1,120 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto3"; + +package tensorflow; + +import "tensorflow/core/protobuf/config.proto"; + +// Summarizes the results of auto-clustering a TensorFlow graph. +// +// Next ID: 5 +message XlaAutoClusteringSummary { + // Represents a single element in a histogram of ops ("op" as in "TensorFlow + // operation"). + // + // Next ID: 3 + message OpAndCount { + // The TensorFlow operation (like MatMult, Add etc.) + string op = 1; + + // The number of times this occurs. + int32 count = 2; + } + + // Describes a single XLA cluster. + // + // Next ID: 4 + message Cluster { + string name = 1; + + // The number of nodes in the cluster. + int32 size = 2; + + // A histogram of the TF operations in this cluster. + repeated OpAndCount op_histogram = 3; + }; + + // The number of nodes in the graph that are not inside an XLA cluster. + int32 unclustered_node_count = 1; + + // The number of nodes in the graph that are in an XLA cluster. + int32 clustered_node_count = 2; + + // All of the XLA clusters in the TF graph. + repeated Cluster clusters = 3; + + // A histogram of the TF operations that were not clustered. + repeated OpAndCount unclustered_op_histogram = 4; +} + +// Listeners listening for auto clustering events get messages of this type. +// +// Next ID: 4 +message XlaAutoClusteringActivity { + // The value of GlobalJitLevel, as determined by `GetGlobalJitLevelForGraph`. + // This determines if global auto-clustering is enabled. + OptimizerOptions.GlobalJitLevel global_jit_level = 1; + + // Whether --tf_xla_cpu_global_jit is enabled in TF_XLA_FLAGS. + bool cpu_global_jit_enabled = 2; + + XlaAutoClusteringSummary summary = 3; +} + +// Listeners listening for JIT compilation events get messages of this type. +// Each instance of XlaJitCompilationActivity corresponds to a single +// compilation of a single XLA cluster. E.g. if a graph has two clusters, A and +// B, and A is compiled 5 times and B is compiled 2 times then we will generate +// 7 instances of XlaJitCompilationActivity. +// +// Next ID: 5 +message XlaJitCompilationActivity { + string cluster_name = 1; + + // The number of time this cluster has been compiled. + int32 compile_count = 2; + + // Microseconds spent in the individual compilation being reported. + int64 compile_time_us = 3; + + // Total microseconds spent in (re-)compiling this cluster so far. + int64 cumulative_compile_time_us = 4; +} + +// LINT.IfChange +// +// Used for logging situations seen in Tensorflow models being optimized that +// are known to not perform well with XLA. +// +// Next ID: 3 +message XlaOptimizationRemark { + // Next ID: 6 + enum Warning { + NONE = 0; + INACCURATE_OPERATION = 1; + SLOW_OPERATION = 2; + UNIMPLEMENTED_OPERATION = 3; + SLOW_IMAGE_RESIZE_DIMENSIONS = 4; + MEGAMORPHIC_FUNCTION = 5; + } + + Warning warning = 1; + + // Information such as which node was the problem. + string debug_information = 2; +} +// LINT.ThenChange(https://www.tensorflow.org/code/tensorflow/compiler/jit/xla_activity_listener.h) diff --git a/executor/proto/tensorflow/compiler/tf2tensorrt/utils/trt_engine_instance.proto b/executor/proto/tensorflow/compiler/tf2tensorrt/utils/trt_engine_instance.proto new file mode 100644 index 0000000000..e839497447 --- /dev/null +++ b/executor/proto/tensorflow/compiler/tf2tensorrt/utils/trt_engine_instance.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package tensorflow.tensorrt; + +import "tensorflow/core/framework/tensor_shape.proto"; + +// Containing information for a serialized TensorRT engine. +message TRTEngineInstance { + // The input shapes of the TRT engine. + repeated TensorShapeProto input_shapes = 1; + + // The serialized TRT engine. + // + // TODO(laigd): consider using a more efficient in-memory representation + // instead of string which is the default here. + bytes serialized_engine = 2; + + // TODO(laigd): consider adding calibration stats, precision_modes, etc. +} diff --git a/executor/proto/tensorflow/compiler/tf2xla/host_compute_metadata.proto b/executor/proto/tensorflow/compiler/tf2xla/host_compute_metadata.proto new file mode 100644 index 0000000000..43ab371a21 --- /dev/null +++ b/executor/proto/tensorflow/compiler/tf2xla/host_compute_metadata.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +package tensorflow.tf2xla; +option cc_enable_arenas = true; +option java_outer_classname = "Tf2XlaProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.tf2xla"; + +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/types.proto"; + +// TensorMetadata indicates the type and shape of a Tensor that is +// part of a host compute transfer. +message TensorMetadata { + DataType type = 1; + TensorShapeProto shape = 2; +} + +// HostTransferMetadata describes a transfer either from host to device +// or device to host. It has a key that is unique to the computation, +// and metadata about the list of tensors being transferred. +message HostTransferMetadata { + // The key used to identify this transfer. + string key = 1; + + // For each Tensor being transferred, its type and shape. + repeated TensorMetadata metadata = 2; +} + +// HostComputeMetadata describes all the sends and recvs +// from all host compute transfer ops in a computation. +message HostComputeMetadata { + // Metadata about each device_to_host transfer + repeated HostTransferMetadata device_to_host = 1; + + // Metadata about each host_to_device transfer + repeated HostTransferMetadata host_to_device = 2; +} diff --git a/executor/proto/tensorflow/compiler/tf2xla/tf2xla.proto b/executor/proto/tensorflow/compiler/tf2xla/tf2xla.proto new file mode 100644 index 0000000000..3093a0b1d8 --- /dev/null +++ b/executor/proto/tensorflow/compiler/tf2xla/tf2xla.proto @@ -0,0 +1,80 @@ +syntax = "proto3"; + +package tensorflow.tf2xla; + +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/types.proto"; + +option cc_enable_arenas = true; +option java_outer_classname = "Tf2XlaProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.tf2xla"; + +// TensorId identifies a tensor in a TensorFlow graph, by specifying the output +// index of a particular node in the graph. If the output of the named node +// feeds into other node(s), this corresponds to one or more edges. Otherwise +// it doesn't correspond to any existing edges at all, e.g. for output nodes. +message TensorId { + string node_name = 1; + int64 output_index = 2; +} + +// Feed represents a single feed tensor in the graph, which corresponds to an +// input argument for the generated computation. +message Feed { + TensorId id = 1; + TensorShapeProto shape = 2; + string name = 3; // Optional name for generated code. + + // Optional data type. This is not normally required, as the graph itself + // contains this information. However, if the node being fed is an op that is + // not linked into the binary, then the type cannot be inferred from the node; + // in this case, the type should be set here. + DataType type = 4; +} + +// Fetch represents a single fetch tensor in the graph, which corresponds to an +// output argument for the generated computation. +message Fetch { + TensorId id = 1; + string name = 2; // Optional name for generated code. + + // Optional shape and data type. If specified, may be used for validation. + TensorShapeProto shape = 3; + DataType type = 4; +} + +// Variable represents a resource variable with the given name, shape and type. +message Variable { + string node_name = 1; + string name = + 2; // Optional name for generated code. If empty, node_name will be used. + TensorShapeProto shape = 3; + DataType type = 4; + + // Flag for variables that are never assigned. Assigments to a read-only + // variable or unassigned variables that are not read-only are invalid. + bool readonly = 5; +} + +// Options used during the conversion and compilation process. +message ConversionOptions { + // When true tf.fake_quant_* ops will be emitted as custom calls to a + // 'fake_quant_with_min_max_vars' function accepting the input, min, max, + // num_bits, and narrow_range values as runtime arguments. + bool custom_fake_quant_op_calls = 1; +} + +// Config represents configuration information for tf2xla conversion. +message Config { + // Each feed is a positional input argument for the generated computation. + // The order of each entry matches the order of each input argument. + repeated Feed feed = 1; + // Each fetch is a positional output argument for the generated computation. + // The order of each entry matches the order of each output argument. + repeated Fetch fetch = 2; + // Each variable is a named input and output of the generated computation. + repeated Variable variable = 3; + // Optional conversion options. + ConversionOptions conversion_options = 4; +} diff --git a/executor/proto/tensorflow/compiler/xla/rpc/xla_service.proto b/executor/proto/tensorflow/compiler/xla/rpc/xla_service.proto new file mode 100644 index 0000000000..0ff8adc2ac --- /dev/null +++ b/executor/proto/tensorflow/compiler/xla/rpc/xla_service.proto @@ -0,0 +1,151 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// XLA service API. +// +// Users 1) build up computations and 2) create allocations via this API. +// Computations are composed of data flowing between arbitrarily-sized +// vector-oriented operations. +// +// Users build up computations using a ComputationHandle, and talk about +// allocations using GlobalDataHandles. +// +// There are currently no checkpointing capabilities or distribution/replication +// guarantees. The service runs on a single machine (e.g. one task) and that is +// its failure domain. +// +// Canonical example of "alpha * X + Y": +// * Make a computation. +// * Add alpha and X and Y as parameters. +// * Request the multiplication of alpha and X. +// * Request the addition of that result and Y. +// +// Then, pass the computation and appropriately shaped inputs to the XLA +// service's Execute method, which provides a result as a GlobalDataHandle. +// +// All data in XLA computations are conceptually immutable. +// +// Note: this API is subject to change / refinement over time -- use the +// provided client libraries to insulate code from changes to this service API. + +syntax = "proto3"; + +import "tensorflow/compiler/xla/xla.proto"; + +package xla; + +service XlaService { + ///////////////////////// + // Global data requests + + // Unregisters a global allocation. + // + // If the handle given is not currently allocated, a NOT_FOUND status is + // returned. + rpc Unregister(UnregisterRequest) returns (UnregisterResponse) { + } + + // Deconstructs a tuple. Returns a newly created GlobalDataHandle for each + // element in the tuple. + rpc DeconstructTuple(DeconstructTupleRequest) + returns (DeconstructTupleResponse) { + } + + // Unpack requests that a global data handle, with a tuple shape, has global + // data handles created for each of its constituent members. This is the + // equivalent of the "destructuring assignment" present in various programming + // languages. + rpc Unpack(UnpackRequest) returns (UnpackResponse) { + } + + // Requests the shape of the referenced global data. + rpc GetShape(GetShapeRequest) returns (GetShapeResponse) { + } + + // Requests the statistics of the given computation. + rpc GetComputationGraphStats(ComputationGraphStatsRequest) + returns (ComputationStatsResponse) { + } + + // Loads a variable number of values with a given element type from ColumnIO. + rpc LoadData(LoadDataRequest) returns (LoadDataResponse) { + } + + // Transfers the given global data to the client in the form of a Literal. + rpc TransferToClient(TransferToClientRequest) + returns (TransferToClientResponse) { + } + + // Transfers the given literal to the server to be stored in a global + // allocation, which is returned. + rpc TransferToServer(TransferToServerRequest) + returns (TransferToServerResponse) { + } + + // Transfers the given literal to the Infeed buffer of the device. + rpc TransferToInfeed(TransferToInfeedRequest) + returns (TransferToInfeedResponse) { + } + + // Transferred literal from the Outfeed buffer of the device. + rpc TransferFromOutfeed(TransferFromOutfeedRequest) + returns (TransferFromOutfeedResponse) { + } + + // Resets the device, clearing all existing state on the device. + rpc ResetDevice(ResetDeviceRequest) returns (ResetDeviceResponse) { + } + + // Computes the value of a constant expression. The request contains the + // computation graph for the constant expression. + rpc ComputeConstantGraph(ComputeConstantGraphRequest) + returns (ComputeConstantResponse) { + } + + // Requests one or more device handles from the target. The returned device + // handles can be used to specify the device on which to execute computations + // or transfer data. + rpc GetDeviceHandles(GetDeviceHandlesRequest) + returns (GetDeviceHandlesResponse) { + } + + // Creates a channel handle that can be used to transfer data between + // two computations via a pair of Send and Recv instructions. + rpc CreateChannelHandle(CreateChannelHandleRequest) + returns (CreateChannelHandleResponse) { + } + + // Compiles the provided computation into executable. Returns the handle of + // the executable. + rpc Compile(CompileRequest) returns (CompileResponse) {} + + // Invokes the provided executable with the provided global data passed as + // immutable arguments. The request contains the handle to the executable. + // Returns global data output and execution timing. + rpc Execute(ExecuteRequest) returns (ExecuteResponse) {} + + // Invokes the provided list of computations in parallel with the provided + // global data for each computation. Returns a list of global data output and + // execution timing. + rpc ExecuteGraphParallel(ExecuteGraphParallelRequest) + returns (ExecuteParallelResponse) { + } + + // Waits until the given execution (aysnchronously launched) is complete, and + // returns the global data output. + rpc WaitForExecution(WaitForExecutionRequest) + returns (WaitForExecutionResponse) { + } +} diff --git a/executor/proto/tensorflow/compiler/xla/service/gpu/backend_configs.proto b/executor/proto/tensorflow/compiler/xla/service/gpu/backend_configs.proto new file mode 100644 index 0000000000..d0795ca66b --- /dev/null +++ b/executor/proto/tensorflow/compiler/xla/service/gpu/backend_configs.proto @@ -0,0 +1,62 @@ +syntax = "proto3"; + +package xla.gpu; + +import "tensorflow/compiler/xla/xla_data.proto"; + +// Backend configs for XLA:GPU. +// +// These are metadata that the GPU backend attaches to HloInstrucitons and later +// uses during e.g. codegen. +// +// Remember that proto3 doesn't give clients a way to tell the difference +// between a field not being present and a field having the default value. +// Choose your defaults carefully. +// +// No guarantee is made about the stability of these protos. +// +// See HloInstruction::backend_config() for more info. + +// Backend config for a convolution that runs through cudnn. +message CudnnConvBackendConfig { + // Opaque algorithm number of cudnn algorithm chosen for this conv. + int64 algorithm = 1; + + // Whether we may use tensor cores when running this conv. Even if this is + // true, cudnn may choose not to use tensor cores, e.g. because the GPU or + // selected algorithm doesn't support it. + bool tensor_ops_enabled = 2; + + // The scaling factor multiplied with the convolution result. + double conv_result_scale = 4; + + // Below are the fields related to cuDNN's fused convolution. Refer to + // CudnnConvParams for their meanings. + + // The requested activation (e.g. relu) after the convolution. It is with type + // stream_executor::dnn::ActivationMode. + int64 activation_mode = 3; + + // The scaling factor multiplied with the side input. If no side input buffer + // is provided, this field must be 0. + double side_input_scale = 5; +} + +// Backend config for the GEMM operation running through cuBLAS. +message GemmBackendConfig { + // Opaque optional algorithm number. No chosen number indicates that a + // different cuBLAS API will be used, which does not allow for choosing an + // algorithm. + oneof algorithm { + int64 selected_algorithm = 1; + } + + double alpha_real = 2; + double alpha_imag = 9; + + double beta = 3; + + xla.DotDimensionNumbers dot_dimension_numbers = 7; + + int64 batch_size = 8; +} diff --git a/executor/proto/tensorflow/compiler/xla/service/gpu/gpu_autotuning.proto b/executor/proto/tensorflow/compiler/xla/service/gpu/gpu_autotuning.proto new file mode 100644 index 0000000000..35b5cfacb2 --- /dev/null +++ b/executor/proto/tensorflow/compiler/xla/service/gpu/gpu_autotuning.proto @@ -0,0 +1,33 @@ +// This is used for convolution logging. Also see +// tensorflow/core/protobuf/autotuing.h +syntax = "proto3"; + +package xla.gpu; + +import "tensorflow/compiler/xla/service/hlo.proto"; +import "tensorflow/compiler/xla/xla_data.proto"; +import "tensorflow/core/protobuf/autotuning.proto"; + +message ConvInstructionLog { + xla.HloInstructionProto instruction = 1; + repeated xla.ShapeProto operand_shapes = 2; + uint64 result_address = 3; + repeated uint64 operand_addresses = 4; +} + +message BlacklistedAlgorithm { + int64 id = 1; + bool tensor_ops = 2; +} + +message AlgorithmBlacklistEntry { + string hlo = 1; + tensorflow.ComputeCapability cc = 2; + tensorflow.CudnnVersion cudnn_version = 3; + string blas_version = 5; + repeated BlacklistedAlgorithm algos = 4; +} + +message AlgorithmBlacklist { + repeated AlgorithmBlacklistEntry entries = 1; +} diff --git a/executor/proto/tensorflow/compiler/xla/service/hlo.proto b/executor/proto/tensorflow/compiler/xla/service/hlo.proto new file mode 100644 index 0000000000..4dd6d09675 --- /dev/null +++ b/executor/proto/tensorflow/compiler/xla/service/hlo.proto @@ -0,0 +1,497 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// This proto file defines messages which represent the HLO module. This is a +// full fidelity serialization of the c++ HLO constructs. +// +// Many of the protos below are simple 1-to-1 serializations of the +// corresponding C++ classes, e.g., HloModule, HloComputation, and +// HloInstruction. +// +// FIELD NAMES ARE IMPORTANT +// +// Unlike most protos, you can't safely change the names of fields, even if you +// keep the numeric ids the same. This is because we sometimes serialize these +// protos as JSON, which includes the field names in the serialization. + +syntax = "proto3"; + +package xla; + +import "tensorflow/compiler/xla/xla_data.proto"; + +option cc_enable_arenas = true; + +// Serialization of HloInstruction. +// Next ID: 68 +message HloInstructionProto { + reserved 10; + reserved "parameter_name"; + reserved 12; + reserved "fused_instructions_computation"; + reserved 4; + reserved "operand_names"; + reserved 5; + reserved "control_predecessor_names"; + reserved 6; + reserved "called_computation_names"; + reserved 44; + reserved "replica_group_ids"; + // Use backend_config instead for custom_call_opaque. + reserved 53; + reserved "custom_call_opaque"; + // Use backend_config instead for all_reduce_barrier. + reserved 46; + reserved "all_reduce_barrier"; + + string name = 1; + string opcode = 2; + xla.ShapeProto shape = 3; + + xla.OpMetadata metadata = 7; + + // Literal, only present for kConstant. + xla.LiteralProto literal = 8; + + // Parameter number is only present for kParameter. + int64 parameter_number = 9; + + // Fusion state, only present for kFusion. + string fusion_kind = 11; + + // Index for kGetTupleElement. + int64 tuple_index = 13; + + // Dimensions present for some operations that require reshaping or + // broadcasting, including Reshape, Reduce, ReduceWindow, and Reverse. + repeated int64 dimensions = 14; + + // Describes the window in a windowed operation such as convolution. + xla.Window window = 15; + + // Describes the dimension numbers used for a convolution. + xla.ConvolutionDimensionNumbers convolution_dimension_numbers = 16; + + // The number of feature groups. Used for a convolution. Must be a divisor of + // the input feature dimension and output feature dimension. If not specified, + // it will use a default value of 1. + int64 feature_group_count = 50; + + int64 batch_group_count = 58; + + // Describes the [begin, end) index range and stride for slices. + message SliceDimensions { + int64 start = 1; + int64 limit = 2; + int64 stride = 3; + } + repeated SliceDimensions slice_dimensions = 17; + + // The bit sizes for a reduce-precision operation. + int32 exponent_bits = 18; + int32 mantissa_bits = 19; + + // Describes the [start, start + size) range size for a dynamic slice + // ('start' is specified dynamically in the second operand of the operation). + repeated int64 dynamic_slice_sizes = 20; + + // The padding configuration that describes the edge padding and interior + // padding of this pad instruction. Only set for pad instructions. + xla.PaddingConfig padding_config = 21; + + // Outfeed configuration information, only present for kOutfeed. + bytes outfeed_config = 22; + + // The distribution requested for random number generation. + // Only present for kRng. + xla.RandomDistribution distribution = 23; + + // A small float number added to the variance to avoid divide-by-zero error. + // Only present for kBatchNormTraining. + float epsilon = 24; + + // An integer value representing the index of the feature dimension. + // Only present for kBatchNormTraining. + int64 feature_index = 25; + + // Represents a unique identifier for each Send/Recv instruction pair or + // optionally for collective instructions (AllReduce, CollectivePermute, + // AllToAll). Non-positive channel_id is equivalent to no channel id. + int64 channel_id = 26; + + // The string representation of the infeed configuration. + bytes infeed_config = 27; + + // Name of a external target (eg, global symbol) to call, only present for + // kCustomCall. + string custom_call_target = 28; + + // Shape of outfeed request. + xla.ShapeProto outfeed_shape = 29; + + // Describes the dimension numbers used for a dot operation + xla.DotDimensionNumbers dot_dimension_numbers = 30; + + // FFT type (FFT, IFFT, etc). + xla.FftType fft_type = 31; + + // FFT length. + repeated int64 fft_length = 32; + + // Comparison direction only used for kCompare. + string comparison_direction = 63; + + // Gather dimension numbers. + xla.GatherDimensionNumbers gather_dimension_numbers = 33; + repeated int64 gather_slice_sizes = 34; + + // Compute Host. + string channel_name = 41; + int64 cost_estimate_ns = 42; + + // The id of this instruction. + int64 id = 35; + + repeated int64 operand_ids = 36; + repeated int64 control_predecessor_ids = 37; + repeated int64 called_computation_ids = 38; + + xla.OpSharding sharding = 40; + + // Backend configuration for the instruction. Has backend-specific meaning. + string backend_config = 43; + + // Cross replica op fields. + repeated ReplicaGroup replica_groups = 49; + // Deprecated, but keeping it for backward compatibility. Use channel_id. + // Non-positive all_reduce_id is equivalent to no all_reduce_id. + int64 all_reduce_id = 45 [deprecated = true]; + + // Whether this Send/Recv instruction transfers data to/from the host. Only + // present for Send and Recv instructions and their SendDone and RecvDone + // partners. + bool is_host_transfer = 47; + + // Whether this Sort instruction should be stable. + bool is_stable = 60; + + xla.ScatterDimensionNumbers scatter_dimension_numbers = 48; + + // Precision configuration for the instruction. Has backend-specific meaning. + xla.PrecisionConfig precision_config = 51; + + // Collective permute field. + repeated SourceTarget source_target_pairs = 52; + + // Sharding for kDomain instructions. + xla.OpSharding domain_entry_sharding = 54; + xla.OpSharding domain_exit_sharding = 55; + + // For custom call this indicates that the layouts are constrained. If + // constrain_layout is true then the 'shape' field must contain a layout, and + // 'operand_shapes_with_layout' must contain a shape with layout for each + // operand. + bool constrain_layout = 56; + repeated xla.ShapeProto operand_shapes_with_layout = 57; + + // Options for TriangularSolve + xla.TriangularSolveOptions triangular_solve_options = 59; + + // Options for Cholesky + xla.CholeskyOptions cholesky_options = 62; + + // Describes how parameters behave with regards to replicas. + xla.ParameterReplication parameter_replication = 61; + + // If set, the given instruction is run in parallel on e.g. multiple CPU + // cores. The outermost dimension gets split up into + // outer_dimension_partitions[0] pieces, the next-outermost dim gets split + // into outer_dimension_partitions[1] pieces, etc. + // + // It's illegal to partition a dimension into more shards than there are + // elements in that dimension. + repeated int64 outer_dimension_partitions = 64; + + // Whether the kCustomCall instruction has side-effects, only present for + // kCustomCall. + bool custom_call_has_side_effect = 65; + + // The delta value for kRngGetAndUpdateState. + int64 delta = 66; + + // Specifies if the gather/scatter indices are guaranteed to be sorted by the + // caller. + bool indices_are_sorted = 67; +} + +// Serialization of HloComputation. +message HloComputationProto { + reserved 3; + reserved "root_name"; + + string name = 1; + + // The array of instructions is always in a valid dependency order, where + // operands appear before their users. + repeated HloInstructionProto instructions = 2; + + // The program shape (with layout) of this computation. + + xla.ProgramShapeProto program_shape = 4; + + // The id of this computation. + int64 id = 5; + + // The id of the root of the computation. + int64 root_id = 6; +} + +// Serialization of an HLO schedule. An HLO schedule contains a total order of +// instructions for each non-fusion computation in the module. +message HloScheduleProto { + message InstructionSequence { + repeated int64 instruction_ids = 1; + } + + // Map from computation id to sequence. + map sequences = 1; +} + +message HloInputOutputAliasProto { + enum Kind { + // Define a UNDEFINED_ALIAS equal to zero to get around the default-0 proto3 + // behavior and missing has_*() APIs. + UNDEFINED_ALIAS = 0; + // An alias setup by the user as must alias. A use setting USER_ALIAS is + // expecting the designed output to be dropped over the given input + // parameter number+index. + USER_ALIAS = 1; + // An alias setup by the compiler as part of its optimizations. + SYSTEM_ALIAS = 2; + } + + // The following proto describes a pair of aliased an input + // (described by parameter number and a ShapeIndex of the parameter) + // and an output (described by a ShapeIndex of the root + // instruction). For example: + // + // entry = { + // output_shape_index={1}, + // parameter_number=0, + // parameter_shape_index={1, 2}, + // } + // + // This entry indicates that the first paremter's {1, 2} element is + // aliased with the {1} element of the root instruction. + message AliasEntryProto { + // ShapeIndex of the root hlo. + repeated int64 output_shape_index = 1; + // Number of the parameter in entry computation. + int64 parameter_number = 2; + // ShapeIndex of the parameter instruction. + repeated int64 parameter_shape_index = 3; + // The kind of alias to be setup. + Kind kind = 4; + } + + repeated AliasEntryProto entries = 1; +} + +message DynamicParameterBindingProto { + // A list of bindings which indicates that the `target_dim_num` in + // the subshape `target_param_index` of parameter `target_param_num` + // is a dynamic dimension and its real dynamic size is represented + // by `dynamic_param_index` in parameter `dynamic_param_num`. + // + // As an example, imagine we have a program: + // + // ENTRY main { + // a = f32[] parameter(0) + // b = f32[10] parameter(1) + // ROOT root = (f32[], f32[10]) tuple(%a, %b) + // } + // + // Let's say 'b' (param index 1) is a dynamic shape whose input has + // an upperbound of 10 and real size is determined at runtime.'a' + // represents the real size of b's first dimension. + // + // In this case, the fields are set in the following way: + // dynamic_param_num = 1 + // dynamic_param_index = {} + // target_param_num = 0 + // target_param_index = {} + // target_param_dim = 0 + message Binding { + int64 dynamic_param_num = 1; + repeated int64 dynamic_param_index = 2; + int64 target_param_num = 3; + repeated int64 target_param_index = 4; + int64 target_param_dim_num = 5; + } + + repeated Binding entries = 1; +} + +// Serialization of HloModule. +message HloModuleProto { + string name = 1; + string entry_computation_name = 2; + int64 entry_computation_id = 6; + + // The array of computations is always in a valid dependency order, where + // callees appear before their callers. + repeated HloComputationProto computations = 3; + + // The host program shape (with layout) of the entry computation. + xla.ProgramShapeProto host_program_shape = 4; + + // The id of this module. + int64 id = 5; + + // The schedule for this module. + HloScheduleProto schedule = 7; + + // Describes alias information between inputs and outputs. + HloInputOutputAliasProto input_output_alias = 8; + + DynamicParameterBindingProto dynamic_parameter_binding = 9; +} + +// Serialization of LogicalBuffer. +message LogicalBufferProto { + // Location represents an instruction and its shape index, which uniquely + // identifies a point where a buffer is needed. + message Location { + // NOTE: module_name isn't necessary, since all LogicalBuffers are + // associated with a single HloModule. + string computation_name = 1; + string instruction_name = 2; + repeated int64 shape_index = 3; + } + + int64 id = 1; + int64 size = 2; + + // The location where the buffer is defined. + Location defined_at = 3; + + int64 color = 4; +} + +// Serialization of BufferAllocation. +message BufferAllocationProto { + // Assigned represents a single LogicalBuffer that is assigned to this + // BufferAllocation. + message Assigned { + int64 logical_buffer_id = 1; + int64 offset = 2; + int64 size = 3; + } + + int64 index = 1; + int64 size = 2; + bool is_thread_local = 3; + bool is_tuple = 11; + bool is_entry_computation_parameter = 5; + bool is_constant = 12; + int64 parameter_number = 6; + repeated int64 parameter_shape_index = 10; + bool maybe_live_out = 7; + int64 color = 8; + repeated Assigned assigned = 9; +} + +// A trace of a HeapSimulator run. +message HeapSimulatorTrace { + // The trace includes a list of events, where each event describes one action + // performed by the heap simulator. + message Event { + enum Kind { + ALLOC = 0; // A memory region was allocated for the buffer. + FREE = 1; // A memory region was freed for the buffer. + + // A buffer was shared with another (canonical) buffer. This is similar to + // ALLOC, except that instead of allocating a new region of memory, the + // memory region of the canonical buffer is directly re-used. Multiple + // buffers may share with the same canonical buffer. The lifetime of the + // canonical buffer is extended to the union of all lifetimes. + SHARE_WITH = 2; + } + Kind kind = 1; + + // The id of the LogicalBuffer that the event applies to. + int64 buffer_id = 2; + + // The HloInstruction that the simulation was processing that caused this + // event to occur, identified by its computation and instruction name. E.g. + // buffers defined by instruction A are allocated when processing A. + string computation_name = 3; + string instruction_name = 4; + + // The id of the canonical LogicalBuffer that the buffer shares with. Only + // set for SHARE_WITH events. + int64 share_with_canonical_id = 5; + } + repeated Event events = 1; + bool whole_module_simulation = 2; +} + +// An abstraction representing a set of HLO module built to run concurrently +// across different devices. +message HloModuleGroupProto { + string name = 1; + repeated HloModuleProto hlo_modules = 2; +} + +// Serialization of BufferAssignment. +message BufferAssignmentProto { + // Alias represents a source LogicalBuffer, and the buffer location that + // aliases it. + message BufferAlias { + int64 source_buffer_id = 1; + LogicalBufferProto.Location location = 2; + } + + repeated LogicalBufferProto logical_buffers = 1; + repeated BufferAlias buffer_aliases = 2; + repeated BufferAllocationProto buffer_allocations = 3; + repeated HeapSimulatorTrace heap_simulator_traces = 4; +} + +// Grouping message that contains all of the information above. +message HloProto { + reserved 2; + reserved "hlo_ordering"; + + HloModuleProto hlo_module = 1; + BufferAssignmentProto buffer_assignment = 3; +} + +// Encapsulates HloProto together with the arguments, result, and +// execution_platform. This message is used for purposes such as +// analysis/replay/file-storage. +message HloSnapshot { + // The hlo graph. + HloProto hlo = 1; + + // The arguments passed to the graph. + repeated LiteralProto arguments = 2; + + // The result of the graph. + LiteralProto result = 3; + + // The name of the platform used to run the graph. + string execution_platform = 4; +} diff --git a/executor/proto/tensorflow/compiler/xla/service/hlo_profile_printer_data.proto b/executor/proto/tensorflow/compiler/xla/service/hlo_profile_printer_data.proto new file mode 100644 index 0000000000..ee66c86ffc --- /dev/null +++ b/executor/proto/tensorflow/compiler/xla/service/hlo_profile_printer_data.proto @@ -0,0 +1,66 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto3"; + +package xla; + +option cc_enable_arenas = true; + +// Describes how to pretty-print a profile counter array gathered for a specific +// HloModule. +message HloProfilePrinterData { + // Pretty-printer information about an HloInstruction. + message HloInstructionInfo { + string long_name = 1; + string short_name = 2; + string category = 3; + + // Metrics computed by HloCostAnalysis. + float flop_count = 4; + float transcendental_count = 5; + float bytes_accessed = 6; + float optimal_seconds = 7; + + // The index into the profile counters array for the HloInstruction + // corresponding to this HloInstructionInfo. + int64 profile_index = 8; + } + + // Pretty-printer information about an HloComputation. + message HloComputationInfo { + string name = 1; + + // The index into the profile counters array for the HloComputation + // corresponding to this HloComputationInfo. + int64 profile_index = 2; + + // HloInstructionInfos for every HloInstruction in the HloComputation for + // corresponding to this HloComputattionInfo. + repeated HloInstructionInfo instruction_infos = 3; + } + + // HloComputationInfos for every HloComputation in the HloModule. + repeated HloComputationInfo computation_infos = 1; + + // The size of the profile counters array we will pretty-print. + int64 profile_counters_size = 2; + + // Maps extra metric name to the index into the profile counters array. + map extra_metrics = 3; + + // Name of the entry computation. + string entry_computation = 4; +} diff --git a/executor/proto/tensorflow/compiler/xla/xla.proto b/executor/proto/tensorflow/compiler/xla/xla.proto new file mode 100644 index 0000000000..09c6c793a2 --- /dev/null +++ b/executor/proto/tensorflow/compiler/xla/xla.proto @@ -0,0 +1,539 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto3"; + +package xla; + +import "tensorflow/compiler/xla/service/hlo.proto"; +import "tensorflow/compiler/xla/xla_data.proto"; + +// Options for the HLO insert-reduce-precision-operations pass. +message HloReducePrecisionOptions { + // Where and when the reduce-precision operations will be added. + enum Location { + // Add reduce-precision operations to the inputs of selected instructions. + // This is done before any optimization occurs. + OP_INPUTS = 0; + // Add reduce-precision operations to the outputs of selected instructions. + // This is done before any optimization occurs. + OP_OUTPUTS = 1; + // After operation-fusion occurs, add reduce-precision operations to the + // outputs of any selected instructions that have not been fused into + // fusion instructions. + UNFUSED_OP_OUTPUTS = 2; + // After operation-fusion occurs, add reduce-precision operations to the + // outputs of any fusion instructions that contain operations matching the + // selection criteria. + FUSION_INPUTS_BY_CONTENT = 3; + // After operation-fusion occurs, add reduce-precision operations to the + // outputs of any fusion instructions that contain operations matching the + // selection criteria. + FUSION_OUTPUTS_BY_CONTENT = 4; + } + Location location = 1; + + // Exponent and mantissa bit counts for the reduced precision. + uint32 exponent_bits = 2; + uint32 mantissa_bits = 3; + + // Operations matching these opcodes should be suffixed with reduce-precision + // operations. + repeated uint32 opcodes_to_suffix = 4; + + // Operations with names containing these substrings should be suffixed with + // reduce-precision operations. + repeated string opname_substrings_to_suffix = 5; +} + +// Debugging options for XLA. These options may change at any time - there are +// no guarantees about backward or forward compatibility for these fields. +message DebugOptions { + // Show addresses of HLO ops in graph dump. + bool xla_hlo_graph_addresses = 2; + + // Instrument the computation to collect per-HLO cycle counts. + bool xla_hlo_profile = 9; + + // List of HLO passes to disable/enable. These names must exactly match the + // pass names as specified by the HloPassInterface::name() method. + // + // At least one of xla_disable_hlo_passes and xla_enable_hlo_passes_only must + // be empty. + repeated string xla_disable_hlo_passes = 30; + repeated string xla_enable_hlo_passes_only = 124; + + // Disables all HLO passes. Notes that some passes are necessary for + // correctness and the invariants that must be satisfied by "fully optimized" + // HLO are different for different devices and may change over time. The only + // "guarantee", such as it is, is that if you compile XLA and dump the + // optimized HLO for some graph, you should be able to run it again on the + // same device with the same build of XLA. + bool xla_disable_all_hlo_passes = 104; + + // Numerical optimization level for the XLA compiler backend; the specific + // interpretation of this value is left to the backends. + int32 xla_backend_optimization_level = 31; + + // Embed the compiler IR as a string in the executable. + bool xla_embed_ir_in_executable = 33; + + // Eliminate implicit broadcasts when lowering user computations to HLO + // instructions; use explicit broadcast instead. + bool xla_eliminate_hlo_implicit_broadcast = 35; + + // When generating calls to Eigen in the CPU backend, use multi-threaded Eigen + // mode. + bool xla_cpu_multi_thread_eigen = 60; + + // Path to directory with cuda/ptx tools and libraries. + string xla_gpu_cuda_data_dir = 61; + + // Enable flush-to-zero semantics in the GPU backend. + bool xla_gpu_ftz = 62; + + // Disable multi-streaming in the GPU backend. + bool xla_gpu_disable_multi_streaming = 63; + + // If true, in LLVM-based backends, emit !alias.scope metadata in + // generated IR. + bool xla_llvm_enable_alias_scope_metadata = 70; + + // If true, in LLVM-based backends, emit !noalias metadata in the + // generated IR. + bool xla_llvm_enable_noalias_metadata = 71; + + // If true, in LLVM-based backends, emit !invariant.load metadata in + // the generated IR. + bool xla_llvm_enable_invariant_load_metadata = 72; + + // If true, a set of expensive LLVM optimization passes will not be run. + bool xla_llvm_disable_expensive_passes = 73; + + // Options for inserting reduce-precision operations for numerical + // experimentation. This is a repeated field, as we may want to have + // multiple passes with different parameters. + repeated HloReducePrecisionOptions hlo_reduce_precision_options = 80; + + // This is used by ClientLibraryTestBase::ComputeAndCompare*. If true, the + // computation will run n! times with all permunations of layouts for the + // output shape in rank n. For example, with a 3D shape, all permutations of + // the set {0, 1, 2} are tried. + bool xla_test_all_output_layouts = 90; + + // This is used by ClientLibraryTestBase::ComputeAndCompare*. If true, the + // computation will run for all permunations of layouts of all input + // arguments. For example, with 2 input arguments in 2D and 4D shapes, the + // computation will run 2! * 4! times. + bool xla_test_all_input_layouts = 91; + + // Assign colors based on sharding information when generating the Graphviz + // HLO graph. + bool xla_hlo_graph_sharding_color = 92; + + reserved 93; // Was xla_hlo_tfgraph_device_scopes + + // If true, the GPU backend is free to use cudnn for HLO batch normalization + // ops. + bool xla_gpu_use_cudnn_batchnorm = 94; + + // Generate calls to MKL-DNN in the CPU backend. + bool xla_cpu_use_mkl_dnn = 97; + + // Maximum kernel unroll factor for the GPU backend. + int32 xla_gpu_max_kernel_unroll_factor = 98; + + // When true, "unsafe" mathematical optimizations are enabled. These + // transformations include but are not limited to: + // + // - Reducing the precision of operations (e.g. using an approximate sin + // function, or transforming x/y into x * (1/y)). + // - Assuming that operations never produce or consume NaN or +/- Inf (this + // behavior can be adjusted using xla_cpu_fast_math_allow_{nans|infs}). + // - Assuming that +0 and -0 are indistinguishable. + bool xla_cpu_enable_fast_math = 99; + + // When xla_cpu_enable_fast_math is true then this controls whether we allow + // operations to produce NaNs. Ignored when xla_cpu_enable_fast_math is + // false. + bool xla_cpu_fast_math_honor_nans = 120; + + // When xla_cpu_enable_fast_math is true then this controls whether we allow + // operations to produce infinites. Ignored when xla_cpu_enable_fast_math is + // false. + bool xla_cpu_fast_math_honor_infs = 121; + + // When xla_cpu_enable_fast_math is true then this controls whether we forbid + // to use the reciprocal of an argument instead of division. Ignored when + // xla_cpu_enable_fast_math is false. + bool xla_cpu_fast_math_honor_division = 126; + + // When xla_cpu_enable_fast_math is true then this controls whether we forbid + // to approximate calculations for functions. Ignored when + // xla_cpu_enable_fast_math is false. + bool xla_cpu_fast_math_honor_functions = 129; + + // When true we lower the Minimum and Maximum hlos in the GPU backend such + // that Min(NotNaN, NaN) = Min(NaN, NotNaN) = NotNaN. In other words, if flag + // this is true we don't propagate NaNs through Min and Max. + bool xla_gpu_enable_fast_min_max = 100; + + // Allows xla to increase the output precision of floating point operations. + bool xla_allow_excess_precision = 122; + + // Crashes the program when any kind of verification fails, instead of just + // logging the failures. One example is cross checking of convolution results + // among different algorithms. + bool xla_gpu_crash_on_verification_failures = 101; + + // Disable GEMM and Convolution auto-tuning. + bool xla_gpu_disable_autotune = 123; + + // Force the host platform to pretend that there are these many host + // "devices". All these devices are backed by the same threadpool. Defaults + // to 1. + // + // Setting this to anything other than 1 can increase overhead from context + // switching but we let the user override this behavior to help run tests on + // the host that run models in parallel across multiple devices. + int32 xla_force_host_platform_device_count = 102; + + // If set to true XLA:GPU invokes `ptxas` with -O0 (default is -O3). + bool xla_gpu_disable_ptxas_optimizations = 103; + + // Enable fast math with eigen in the HLO evaluator. + bool xla_hlo_evaluator_use_fast_path = 106; + + // Temporary option to allow support for both the R1 and the scalar index + // versions of DynamicSlice and DynamicUpdateSlice. Only used for testing. + bool xla_allow_scalar_index_dynamic_ops = 107; + + enum StepMarkerLocation { + // Generate a step marker at the program entry. This handles the case where + // each step is done by one or multiple program execution(s). Only the first + // program will be tagged for generating a step marker at the program entry. + // This is the default. + STEP_MARK_AT_ENTRY = 0; + // Generate a step marker at each iteration of the top level while loop, + // which is assumed to be a training loop. + STEP_MARK_AT_TOP_LEVEL_WHILE_LOOP = 1; + // Generate a step marker at each iteration of the second level while loops, + // which is assumed to be a training or eval loop. + STEP_MARK_AT_SECOND_LEVEL_WHILE_LOOP = 3; + // No step marker generated. + STEP_MARK_NONE = 2; + } + // Option to emit a target-specific marker to indicate the start of a training + // step. The location of the marker (if any) is determined by the option + // value. + StepMarkerLocation xla_step_marker_location = 108; + + // + // BEGIN flags controlling dumping HLO modules for debugging. + // + // When dumping is enabled, HLO modules dumped at the very beginning and end + // of compilation, and optionally also during the pass pipeline. + // + // In general, if you set one of these flags, we will try to infer reasonable + // defaults for the others. For example: + // + // * Setting --xla_dump_to=/tmp/foo without specifying a format + // with --xla_dump_hlo_as_* will turn on --xla_dump_hlo_as_text. + // + // * Setting --xla_dump_hlo_as_text without specifying --xla_dump_to will + // dump to stdout. + // + + // Directory to dump into. + string xla_dump_to = 109; + + // If specified, will only dump modules which match this regexp. + string xla_dump_hlo_module_re = 110; + + // If this flag is specified, will also HLO before and after passes that match + // this regular expression. Set to .* to dump before/after all passes. + string xla_dump_hlo_pass_re = 111; + + // Specifies the format that HLO is dumped in. Multiple of these may be + // specified. + bool xla_dump_hlo_as_text = 112; + bool xla_dump_hlo_as_proto = 113; + bool xla_dump_hlo_as_dot = 114; + bool xla_dump_hlo_as_url = 115; + + // Dump HLO graphs as an HTML (DOT -> SVG inlined in HTML) + bool xla_dump_hlo_as_html = 116; + + // If true, every time an HLO module is run, we will dump an HloSnapshot + // (essentially, a serialized module plus its inputs) to the --xla_dump_to + // directory. + bool xla_dump_hlo_snapshots = 118; + + // + // END flags controlling dumping HLO modules. + // + + bool xla_gpu_force_conv_nchw = 125; + + // Paths to files with ptx code. + repeated string xla_gpu_ptx_file = 127; + + // Blacklist for cuDNN convolutions. + string xla_gpu_algorithm_blacklist_path = 128; + + // Next id: 130 + + // Extra options to pass to the compilation backend (e.g. LLVM); specific + // interpretation of these values is left to the backend. + map xla_backend_extra_options = 500; + + reserved 117; // was xla_dump_to + reserved 5; // Was xla_hlo_dump_as_graphdef +} + +// These settings control how XLA compiles and/or runs code. Not all settings +// will have an effect on every platform. +// +// When adding new fields, keep in mind that boolean fields default to false. +message ExecutionOptions { + // This optional field's layout is used as a hint when storing the output of + // this computation. Subsequent transfers of this output array to the client + // may be faster when using this layout. + // + // We use a Shape here to accommodate computations that return a tuple. + ShapeProto shape_with_output_layout = 2; + + // Used to seed random-number generators used in this computation. If this is + // 0, we generate a seed ourselves. + // + // TODO(b/32083678): Changing the seed unnecessarily forces a recompilation. + uint64 seed = 3; + + DebugOptions debug_options = 4; + + // This optional field specifies a particular set of devices to run the + // computation on. The computation will be partitioned across these devices. + // If not provided, the default device will be chosen. + repeated DeviceHandle device_handles = 5; + + // Number of replicas of the computation to run. If zero, uses the default + // number of replicas for the XLA service. + int32 num_replicas = 6; + + // This optional field specifies the device assignment if known at compile + // time. + DeviceAssignmentProto device_assignment = 7; +} + +message GetDeviceHandlesRequest { + int64 device_count = 1; +} + +message GetDeviceHandlesResponse { + repeated DeviceHandle device_handles = 1; +} + +message TransferToClientRequest { + GlobalDataHandle data = 1; + + // This optional field directs the service to return the literal in this + // layout. A shape is used to hold the layout to accommodate tuples. + ShapeProto shape_with_layout = 2; +} + +message TransferToClientResponse { + LiteralProto literal = 1; +} + +message TransferToServerRequest { + LiteralProto literal = 1; + DeviceHandle device_handle = 2; +} + +message TransferToServerResponse { + GlobalDataHandle data = 1; +} + +message TransferToInfeedRequest { + LiteralProto literal = 1; + int64 replica_id = 2; + DeviceHandle device_handle = 3; +} + +message TransferToInfeedResponse {} + +message TransferFromOutfeedRequest { + // This optional field directs the service to return the literal in this + // layout. A shape is used to hold the layout to accommodate tuples. + ShapeProto shape_with_layout = 1; + + int64 replica_id = 2; + DeviceHandle device_handle = 3; +} + +message TransferFromOutfeedResponse { + LiteralProto literal = 1; +} + +message ResetDeviceRequest { + DeviceHandle device_handle = 1; +} + +message ResetDeviceResponse {} + +message ComputationGraphStatsRequest { + HloModuleProto computation = 1; + DebugOptions debug_options = 2; +} + +message ComputationStatsResponse { + ComputationStats stats = 1; +} + +message CreateChannelHandleRequest { + ChannelHandle.ChannelType channel_type = 1; +} + +message CreateChannelHandleResponse { + ChannelHandle channel = 1; +} + +message UnregisterRequest { + repeated GlobalDataHandle data = 1; +} + +message UnregisterResponse {} + +message CompileRequest { + // The graph to be compiled. + HloModuleProto computation = 1; + + // Options that affect how XLA compiles code to service this request. + ExecutionOptions execution_options = 2; + + // The layouts of the input arguments. If not set, the default layout will be + // used. Although the real arguments are not needed in compilation, the + // layouts of the arguments can affect the compilation. + repeated ShapeProto input_shape_with_layout = 3; +} + +message CompileResponse { + // The handle to the executable. + ExecutionHandle handle = 1; +} + +message ExecuteRequest { + ExecutionHandle handle = 1; + + // The shape and layout of the arguments must be the same as the those of the + // executable's parameters. + repeated GlobalDataHandle arguments = 2; +} + +// TODO(b/118493728): Remove this and ExecuteGraphParallelRequest and replace +// the uses with calls to Compile and Execute. +message ExecuteGraphRequest { + HloModuleProto computation = 1; + repeated GlobalDataHandle arguments = 2; + + // Options that affect how XLA compiles and runs code to service this request. + ExecutionOptions execution_options = 3; +} + +message ExecuteGraphParallelRequest { + repeated ExecuteGraphRequest requests = 1; +} + +message ExecuteResponse { + GlobalDataHandle output = 1; + ExecutionProfile profile = 2; +} + +message ExecuteParallelResponse { + repeated ExecuteResponse responses = 1; +} + +message WaitForExecutionRequest { + ExecutionHandle execution = 1; +} + +message WaitForExecutionResponse { + GlobalDataHandle output = 1; + ExecutionProfile profile = 2; +} + +message ComputeConstantGraphRequest { + HloModuleProto computation = 1; + LayoutProto output_layout = 2; +} + +message ComputeConstantResponse { + // A LiteralProto is returned directly for this request. + LiteralProto literal = 1; +} + +message DeconstructTupleRequest { + GlobalDataHandle tuple_handle = 2; +} + +message DeconstructTupleResponse { + repeated GlobalDataHandle element_handles = 1; +} + +message LoadDataRequest { + // Describes the path of the ColumnIO tablet to load. + string columnio_tablet_path = 1; + + // Describes the field to load within the ColumnIO tablet. + string columnio_field = 2; + + // Individual element shape, excluding rows. + ShapeProto element_shape = 3; + + // Warning: ColumnIO does not support random-access, so use offset with + // caution in performance-critical scenarios. + int64 offset = 4; + + // Maximum number of elements (with shape element_shape) to load. + int64 limit = 5; + + // If more than one item is requested (via limit > 1), then this request + // attribute zips together the produced vectors. + bool zip = 6; +} + +message LoadDataResponse { + GlobalDataHandle data = 1; + ShapeProto data_shape = 2; + int64 available_rows = 3; + int64 rows_loaded = 4; + int64 nanoseconds = 5; +} + +message GetShapeRequest { + GlobalDataHandle data = 1; +} + +message GetShapeResponse { + ShapeProto shape = 1; +} + +message UnpackRequest { + GlobalDataHandle data = 1; +} + +message UnpackResponse { + repeated GlobalDataHandle tied_data = 1; +} diff --git a/executor/proto/tensorflow/compiler/xla/xla_data.proto b/executor/proto/tensorflow/compiler/xla/xla_data.proto new file mode 100644 index 0000000000..120be3d86c --- /dev/null +++ b/executor/proto/tensorflow/compiler/xla/xla_data.proto @@ -0,0 +1,673 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto3"; + +package xla; + +option cc_enable_arenas = true; + +// Primitive types are the individual values that can be held in rectangular +// multidimensional arrays. A description of the rectangular multidimensional +// array dimensions / primitive type is given by Shape, below. +enum PrimitiveType { + // Invalid primitive type to serve as default. + PRIMITIVE_TYPE_INVALID = 0; + + // Predicates are two-state booleans. + PRED = 1; + + // Signed integral values of fixed width. + S8 = 2; + S16 = 3; + S32 = 4; + S64 = 5; + + // Unsigned integral values of fixed width. + U8 = 6; + U16 = 7; + U32 = 8; + U64 = 9; + + // Floating-point values of fixed width. + // + // Note: if f16s are not natively supported on the device, they will be + // converted to f16 from f32 at arbirary points in the computation. + F16 = 10; + F32 = 11; + + // Truncated 16 bit floating-point format. This is similar to IEEE's 16 bit + // floating-point format, but uses 1 bit for the sign, 8 bits for the exponent + // and 7 bits for the mantissa. + BF16 = 16; + + F64 = 12; + + // Complex values of fixed width. + C64 = 15; // Paired F32 (real, imag), as in std::complex. + C128 = 18; // Paired F64 (real, imag), as in std::complex. + + // A tuple is a polymorphic sequence; e.g. a shape that holds different + // sub-shapes. They are used for things like returning multiple values from a + // computation; e.g. a computation that returns weights and biases may have a + // signature that results in a tuple like (f32[784x2000], f32[2000]) + // + // If a shape proto has the tuple element type, it may not have any entries + // in the dimensions field. + TUPLE = 13; + + // An opaque type used for passing context-specific data to a custom + // operation. Shapes of this primitive type will have empty dimensions and + // tuple_shapes fields. + // + // (OPAQUE would be a better name for this identifier, but that conflicts with + // a macro defined in windows.h.) + OPAQUE_TYPE = 14; + + // A token type threaded between side-effecting operations. Shapes of this + // primitive type will have empty dimensions and tuple_shapes fields. + TOKEN = 17; + + // Next = 19 +} + +// Describes the padding configuration for Pad operation. The padding amount on +// both edges as well as between the elements are specified for each dimension. +message PaddingConfig { + // Describes the padding configuration for a dimension. + message PaddingConfigDimension { + // Padding amount on the low-end (next to the index 0). May be negative. + int64 edge_padding_low = 1; + + // Padding amount on the high-end (next to the highest index). May be + // negative. + int64 edge_padding_high = 2; + + // Padding amount between the elements. May not be negative. + int64 interior_padding = 3; + } + + // The padding configuration for all dimensions. + repeated PaddingConfigDimension dimensions = 1; +} + +// A format specifies the method used by a layout to store an array in memory. +enum Format { + // TODO(b/120869032): Rename this to FORMAT_NONE or something else which + // better corresponds to its meaning. + INVALID_FORMAT = 0; + // The default layout, with exactly one storage location per element. + DENSE = 1; + // A sparsely encoded layout, providing only the index/value pairs of non-zero + // elements. + SPARSE = 2; +} + +// Describes a tile used in tiling-based layout. Refer to +// g3doc/third_party/tensorflow/compiler/xla/g3doc/layout_with_tiling.md for +// details about tiling-based layout. +message TileProto { + // Number of elements in each dimension of the tile. It's ordered from the + // most major dimension of the tile to the most minor dimension of the tile. + // The dimensions correspond to a suffix of the dimensions of the shape being + // tiled. + repeated int64 dimensions = 1; +} + +// A layout describes how the array is placed in (1D) memory space. This +// includes the minor-to-major ordering of dimensions within a shape. +// +// Clients must specify the layouts of input Literals to the +// computation. Layouts specified in interior operations which take Shapes (for +// example, Convert) are ignored. +// +// See the XLA documentation for more information on shapes and layouts. +// +// LINT.IfChange +message LayoutProto { + // The method used to store the data in memory. The format determines which of + // the other fields are used by the layout. + Format format = 4; + + // Sequence of dimension numbers, from minor (fastest varying index) to major + // (slowest varying index). This field is required. + repeated int64 minor_to_major = 1; + + reserved 2; + reserved "padded_dimensions"; + + reserved 3; + reserved "padding_value"; + + // The maximum number of elements that can be stored for SPARSE formats. This + // can be used to determine the maximum size in bytes of arrays stored in + // memory. This field must be unset unless the format is SPARSE. + int64 max_sparse_elements = 5; + + // A sequence of tiles, starting from the tile that's applied first to the + // Shape. + // + // TODO(b/119839262): implement tiling in each backend or add Unimplemented + // error. + repeated TileProto tiles = 6; + + // Bit size of each element. If the size is bigger than what the element + // type requires, the value is stored in the least significant + // bits and the additional most significant bits are filled with 0's. + // + // TODO(b/119839262): implement in each backend or add Unimplemented error. + int64 element_size_in_bits = 7; + + // Memory space where this array resides. The integer field is interpreted in + // a backend-specific manner. + int64 memory_space = 8; + + // Important: if any field is added, be sure to modify ShapeUtil::Equal() and + // LayoutUtil::Hash appropriately to account for the new field. +} +// LINT.ThenChange( \ +// https://www.tensorflow.org/code/tensorflow/compiler/xla/shape_util.cc, \ +// https://www.tensorflow.org/code/tensorflow/compiler/xla/layout_util.cc) + +// A shape describes the number of dimensions in the array, the size of each +// dimension, and the primitive component type. +// +// Tuples are a special case in that they have rank zero and have tuple_shapes +// defined. +// +// See the XLA documentation for more information on shapes and layouts. +// +// LINT.IfChange +message ShapeProto { + reserved 1; + reserved "rank"; + + // The element type for this shape. + PrimitiveType element_type = 2; + + // The size (number of elements) for each dimension, or an upper bound on the + // size if the dimension is dynamic. In XLA, dimensions are numbered from 0 + // to N-1 for an N-dimensional array. The first element of 'dimensions' is the + // size of dimension 0, the second element is the size of dimension 1, and so + // forth. Empty list indicates a scalar. + // + // If the respective element in 'is_dimension_dynamic' is true then the value + // in this field represents an upper bound on the size of the dimension. + repeated int64 dimensions = 3; + + // For tuples only, the shapes of constituent shapes in the tuple sequence. + repeated ShapeProto tuple_shapes = 4; + + // The layout used to back this shape. + LayoutProto layout = 5; + + // For arrays, this indicates whether or not each dimension is + // dynamically-sized. The number of elements in this repeated field should be + // zero (indicating that no dimensions are dynamic) or equal to the number of + // elements in the 'dimensions' field. + repeated bool is_dynamic_dimension = 6; + + // Important: if any field is added, be sure to modify ShapeUtil::Equal(), + // ShapeUtil::Compatible() and ShapeUtil::Hash() appropriately to account for + // the new field. +} +// LINT.ThenChange( \ +// https://www.tensorflow.org/code/tensorflow/compiler/xla/shape_util.cc) + +// Shape of the parameters and output of a computation (like a traditional +// function signature). +message ProgramShapeProto { + repeated ShapeProto parameters = 1; + ShapeProto result = 2; + repeated string parameter_names = 3; +} + +// Statistics of a computation. +message ComputationStats { + // The number of floating point operations in the computation. + double flop_count = 1; + + // The number of transcendental operations (e.g., exp) in the computation. + double transcendental_count = 2; +} + +// Symbolization metadata for HLO Instructions. +// +// This metadata is used for debugging XLA code generation, as well as +// performance profiling of XLA-generated executables. +message OpMetadata { + // The framework op name that generated this XLA op. + // + // Frameworks that build on top of XLA should mirror the names of their ops + // back to users by specifying the op_type. In this way, even if the + // framework's "ops" are implemented as multiple XLA HLO Ops, they can be + // grouped appropriately. (e.g. if a SoftMax layer is emitted into XLA as + // multiple ops, then each op should have the op_type be "SoftMax".) + string op_type = 1; + // The user-specified name of the op. + // + // This name is often unique within a computation. Note: some frameworks + // add auto-generated names if the user does not provide one. + string op_name = 2; + // Indicate a file and line that this op is associated to in a user's program. + // + // e.g. it could be the file and line of user code that generated the op. + string source_file = 3; + int32 source_line = 4; +} + +// Profile data from the execution of a computation. +message ExecutionProfile { + // Whether the executable was read from the compilation cache. + bool compilation_cache_hit = 1; + + // The time in milliseconds spent to compile the computation. This only set if + // the executable was not read from the compilation cache + // (compilation_cache_hit == false). + int64 compile_time_ms = 2; + + // The number of cycles spent for the computation. This does not include the + // time taken for the data transfers between the host and the device. This is + // a target-dependent field and only used for debugging purposes. + int64 compute_cycle_count = 3; + + // The time in nanoseconds spent for the computation, without data transfer. + int64 compute_time_ns = 4; + + // The time in nanoseconds spent for the entire computation, including the + // result data transfer time. Current implementation does not spend any cycles + // for the input data transfer since the memory is initialized with the proper + // values before the execution. + int64 compute_and_transfer_time_ns = 5; + + // The size of the binary code in the executable. + int64 executable_size_in_bytes = 6; + + // Whether this profile was drawn from a cache of profiles instead of from + // execution on the hardware. + bool profile_cache_hit = 7; +} + +// Handle given to a user that represents an execution that the user launched +// asynchronously on the device. +message ExecutionHandle { + int64 handle = 1; +} + +// Handle given to a user that represents a globally accessible allocation. +// Contrast this against a ComputationDataHandle, which is not globally +// accessible, since it only exists within a specific computation. +message GlobalDataHandle { + int64 handle = 1; +} + +// Handle given to a user that represents a replicated virtual device. Each +// replicated device represents N physical devices for execution where N is the +// number of replicas. +message DeviceHandle { + int64 handle = 1; + + // The number of model-parallel virtual devices that communicate via XLA + // Send/Recv instructions. + int64 device_count = 2; +} + +// Handle given to a user to represent a channel between two computations +// via a Send and Recv instruction pair. Channels are unbuffered, so Send +// Send instructions will be blocked until the data is transferred. +message ChannelHandle { + int64 handle = 1; + enum ChannelType { + // Invalid primitive type to serve as default. + CHANNEL_TYPE_INVALID = 0; + + // A channel for sending data between devices. + DEVICE_TO_DEVICE = 1; + + // A channel for sending data from the device to the host. Can only be used + // with a Send operation. + DEVICE_TO_HOST = 2; + + // A channel for sending data from the host to the device. Can only be used + // with a Recv operation. + HOST_TO_DEVICE = 3; + } + ChannelType type = 2; +} + +// DeviceAssignmentProto is a serialized form of DeviceAssignment class, which +// represents the device ids assigned to a set of replicated computations. +// See xla::DeviceAssignment class comment for more details. +message DeviceAssignmentProto { + int32 replica_count = 1; + int32 computation_count = 2; + + // Each logical computation runs on replica_count physical devices. + // ComputationDevice represents the device ids assinged to the replicas. + message ComputationDevice { + repeated int32 replica_device_ids = 1; + } + repeated ComputationDevice computation_devices = 3; +} + +// Literals are used when the server and client need to exchange materialized +// data / results. Literals are also used to describe constants used in +// computations. +// +// Transfers to/from the client are encoded in literal form, and the structure +// of the repeated fields is implied by the shape. +message LiteralProto { + ShapeProto shape = 1; + repeated bool preds = 2; + bytes s8s = 15; + bytes u8s = 3; + repeated int32 s32s = 4; + repeated int64 s64s = 5; + repeated uint32 u32s = 6; + repeated uint64 u64s = 7; + repeated float f32s = 8; + repeated double f64s = 9; + repeated float c64s = 12; // Stored as interleaved real, imag floats. + repeated double c128s = 18; // Stored as interleaved real, imag doubles. + repeated LiteralProto tuple_literals = 10; + // The F16s, BF16s, U16s and S16s are encoded in little endian byte order + bytes f16s = 11; + bytes bf16s = 13; + bytes u16s = 16; + bytes s16s = 17; + repeated int64 sparse_indices = 14; + // Next = 19 +} + +message WindowDimension { + // The size of the window in this dimension. For a rectangle, this would be + // the width or height. + int64 size = 1; + + // The stride at which the window moves across the base area in this + // dimension. In other words, this is the spacing between different + // positions of the window in this dimension. + int64 stride = 2; + + // If positive, means the amount of padding to add to the base area at the low + // end of this dimension; if negative, its negative means the number of + // elements removed from the low end of this dimension. For example, in the + // horizontal dimension of a rectangle, this would be the number of padding + // values to pad on the left, given that indices increase when going right. + // The actual padding value depends upon the context. Convolution pads with + // zeros. ReduceWindow and SelectAndScatter pads with the reduce function's + // init value. + int64 padding_low = 3; + + // As padding_low, but on the high end of this dimension. For example, in the + // horizontal dimension of a rectangle, this would be the number of values to + // pad on the right, given that indices increase when going right. + int64 padding_high = 4; + + // Dilation factor of the sliding window in this dimension. A dilation factor + // of 1 means no dilation. window_dilation - 1 no-op entries ("holes") are + // implicitly placed between each kernel element. This value may not be less + // than 1. See documentation for convolution. + int64 window_dilation = 5; + + // Dilation factor of the base area in this dimension. A dilation factor of 1 + // means no dilation. base_dilation - 1 no-op entries ("holes") are implicitly + // placed between each base area element. This value may not be less than 1. + // See documentation for convolution. + int64 base_dilation = 6; + + // Window reversal means that this dimension was logically reversed before the + // operation. + bool window_reversal = 7; +} + +// Describes the windowing in an operation such as convolution. +// +// The window is moved across a base area and for each position of the +// window a computation is performed. The field below describes the +// window and the movement of the window across a base area. +message Window { + repeated WindowDimension dimensions = 1; +} + +// Describes the dimension numbers for a gather operation. +// +// See https://www.tensorflow.org/performance/xla/operation_semantics#gather for +// more details. +message GatherDimensionNumbers { + // "Window indices" is a term for a set of indices that index into the + // interior of a dynamic-slice from the input tensor, the starting indices for + // which were computed from output_gather_dims (see the operation semantic for + // how this is defined) and the start_indices tensor. + // + // The window indices for a specific output index Out is computed as: + // + // i = 0 + // for (k : [0, input_tensor_shape.rank)) + // window_indices[k] = + // if k in collapsed_slice_dims + // then 0 + // else Out[offset_dims[i++]] + repeated int64 offset_dims = 1; + repeated int64 collapsed_slice_dims = 2; + + // This is interpreted as a map from i to start_index_map[i]. It + // transforms the gather index looked up from the start_indices tensor into + // the starting index in the input space. + repeated int64 start_index_map = 3; + + // The dimension in the start_indices input that contains the starting + // indices. + int64 index_vector_dim = 4; +} + +// Describes the dimension numbers for a scatter operation. +// +// All the fields are similar to the corresponding fields in +// GatherDimensionNumbers. Differences are noted below. +message ScatterDimensionNumbers { + // The set of dimensions in the updates shape that are window dimensions. + repeated int64 update_window_dims = 1; + // The set of window dimensions that must be inserted into the updates shape. + repeated int64 inserted_window_dims = 2; + + repeated int64 scatter_dims_to_operand_dims = 3; + int64 index_vector_dim = 4; +} + +message ConvolutionDimensionNumbers { + // The number of the dimension that represents batch in the input. + int64 input_batch_dimension = 7; + + // The number of the dimension that represents features in the input. + int64 input_feature_dimension = 8; + + // The dimension numbers for the spatial dimensions that the window + // moves through in the input. + repeated int64 input_spatial_dimensions = 11; + + // The number of the dimension that represents input features in the + // convolutional kernel (rhs). + int64 kernel_input_feature_dimension = 3; + + // The number of the dimension that represents output features in + // the convolutional kernel (rhs). + int64 kernel_output_feature_dimension = 4; + + // The dimension numbers for the spatial dimensions that the window + // moves through in the kernel (rhs). window.strides(0) is the + // stride in the kernel_spatial_dimensions(0) dimension. + repeated int64 kernel_spatial_dimensions = 6; + + // The number of the dimension that represents batch in the output. + int64 output_batch_dimension = 9; + + // The number of the dimension that represents features in the output. + int64 output_feature_dimension = 10; + + // The dimension numbers for the spatial dimensions that the window + // moves through in the output. + repeated int64 output_spatial_dimensions = 12; + + // Next = 13 +} + +enum FftType { + FFT = 0; // Forward FFT; complex in, complex out. + IFFT = 1; // Inverse FFT; complex in, complex out. + RFFT = 2; // Forward real FFT; real in, fft_length / 2 + 1 complex out + IRFFT = 3; // Inverse real FFT; fft_length / 2 + 1 complex in, + // fft_length real out +} + +message DotDimensionNumbers { + // The dimension numbers that represent the 'lhs' contracting dimensions. + repeated int64 lhs_contracting_dimensions = 1; + // The dimension numbers that represent the 'rhs' contracting dimensions. + repeated int64 rhs_contracting_dimensions = 2; + // The dimension numbers that represent the 'lhs' batch dimensions. + repeated int64 lhs_batch_dimensions = 3; + // The dimension numbers that represent the 'rhs' batch dimensions. + repeated int64 rhs_batch_dimensions = 4; +} + +enum RandomDistribution { + RNG_INVALID = 0; + + // Creates a uniform-distribution-generated random number on the semi-open + // interval [parameter[0], parameter[1]). + RNG_UNIFORM = 1; + + // Creates a normal-distribution-generated random number with mean + // parameter[0] and standard deviation parameter[1]. + RNG_NORMAL = 2; + + // Next: 4 +} + +message TriangularSolveOptions { + // If true, solves ax = b. If false, solves xa = b. + bool left_side = 1; + + // If true, 'a' is lower triangular. If false, 'a' is upper triangular. + bool lower = 2; + + // If true, the diagonal elements of 'a' are assumed to be 1 and not accessed. + bool unit_diagonal = 3; + + // Should we transpose or use the adjoint of 'a'? + enum Transpose { + TRANSPOSE_INVALID = 0; + NO_TRANSPOSE = 1; // Don't transpose 'a'. + TRANSPOSE = 2; // Transpose 'a'. + ADJOINT = 3; // Complex conjugate and transpose 'a'. + }; + Transpose transpose_a = 4; +} + +message CholeskyOptions { + // If true, uses the lower triangle of `a`. If false, uses the upper triangle + // of `a`. + bool lower = 1; +} + +message OpSharding { + enum Type { + // This sharding is replicated across all devices (implies maximal, + // all other fields are unused). + REPLICATED = 0; + // This sharding is maximal - one device runs the entire operation. + MAXIMAL = 1; + // This sharding is a tuple - only the tuple_shardings field is valid. + TUPLE = 2; + // None of the above; tile_shape and tile_assignment are both used. + OTHER = 3; + } + Type type = 1; + // The shape of the sharded tile. + ShapeProto tile_shape = 2; + // The shape of the tile assignment tensor - this must be the same rank as + // tile_shape and the product of its dimensions must equal + // tile_assignment_devices.size(). + repeated int64 tile_assignment_dimensions = 3; + // Flattened list of device IDs. The order of flattening is the same as used + // by IndexUtil::MultiToLinearIndex(tile_assignment_shape). + repeated int64 tile_assignment_devices = 4; + // If type == TUPLE, the sub-shardings, one per leaf node in the tuple shape, + // in pre-order. The tuple shape could be nested; here we store just a + // flattened list of all leaves in the tuple shape. Note that the tuple shape + // is not stored here; shardings do not store the shapes to which they are + // applied, this is inferred from the instruction this sharding gets attached + // to. + repeated OpSharding tuple_shardings = 5; +} + +// Describes the replica groups in a cross replica op (e.g., all-reduce and +// all-to-all). +message ReplicaGroup { + // The ids of the replicas that belongs to the same group. The ordering of the + // ids matters in some ops (e.g., all-to-all). + repeated int64 replica_ids = 1; +} + +// Describes the source target pair in the collective permute op. +message SourceTarget { + int64 source = 1; + int64 target = 2; +} + +// Used to indicate the precision configuration. It has backend specific +// meaning. +message PrecisionConfig { + enum Precision { + DEFAULT = 0; + HIGH = 1; + HIGHEST = 2; + + // Next: 3 + } + repeated Precision operand_precision = 1; + + // Next: 2 +} + +// Describes whether all data-parallelism replicas will receive the same +// parameter data at each buffer. +message ParameterReplication { + // A list of boolean values for the flattened leaf buffers. Each value + // indicates whether the corresponding leaf buffer is replicated. + // + // If this field is empty, it means no buffer is replicated. Otherwise, the + // number of elements in this field must match the number of leaf buffers in + // the HLO instruction's shape. + repeated bool replicated_at_leaf_buffers = 1; +} + +// A backend-config for kWhile loops that stores the loop's trip count, if it is +// known. +// +// This is useful for backends that can implement a `for i in 0..N` loop more +// efficiently than a `while` loop. For example, on GPUs, we can implement a +// `for i in 0..N` loop by enqueueing the kernels for the loop body N times, +// whereas implementing a `while` loop requires a host-device sync on each +// iteration. +message WhileLoopBackendConfig { + message KnownTripCount { + int64 n = 1; + } + // This indirection lets us distinguish between known-trip-count == 0 and + // unknown-trip-count. + KnownTripCount known_trip_count = 1; +} diff --git a/executor/proto/tensorflow/compiler/xrt/xrt.proto b/executor/proto/tensorflow/compiler/xrt/xrt.proto new file mode 100644 index 0000000000..a598b80032 --- /dev/null +++ b/executor/proto/tensorflow/compiler/xrt/xrt.proto @@ -0,0 +1,166 @@ +syntax = "proto3"; + +package xrt; + +import "tensorflow/compiler/tf2xla/host_compute_metadata.proto"; +import "tensorflow/compiler/xla/service/hlo.proto"; +import "tensorflow/compiler/xla/xla.proto"; +import "tensorflow/compiler/xla/xla_data.proto"; + +message DeviceAssignment { + message ComputationDevice { + message DeviceMeshCoordinates { + // The mesh coordinates for the device. Usually (X, Y, Core), in the order + // in which they are returned in the TopologyProto. + // X = value(0) + // Y = value(1) + // Core = value(2) + repeated int32 value = 1; + } + // As many replicas as there are in the replicated computation. + repeated DeviceMeshCoordinates replica_devices = 1; + } + // As many ComputationDevice as many there are computations (number + // of cores per replica). + repeated ComputationDevice computation_devices = 1; +} + +// Options for an XLA compilation. +message XLAComputationConfig { + // The number of replicas the computation will be run on. If this is + // default (0) it is interpreted as 1. + int32 num_replicas = 1; + // The number of "model-parallel" cores per replica. If this is + // default (0) it is interpreted as 1. + int32 num_cores_per_replica = 2; + // Optional metadata about host sends and recvs. + tensorflow.tf2xla.HostComputeMetadata host_compute_metadata = 3; + + // The arg/result shapes for the whole computation. + xla.ProgramShapeProto program_shape = 4; + // The arg/result shapes for each core of a model-parallel + // computation. per_core_args_and_result_shapes is optional for a + // single-core computation. + repeated xla.ProgramShapeProto per_core_program_shape = 5; + // Describes how replicated computation instances should be assigned to + // devices. There are num_cores_per_replica computations, and each one will be + // sent and executed to the set of replica device numbers described in the + // DeviceAssignment proto. + DeviceAssignment device_assignment = 6; + // The debugging options to be passed to the XLA compilation process. + xla.DebugOptions debug_options = 7; +} + +// Options and XLA computation for a compilation. +message XLAComputation { + XLAComputationConfig config = 1; + xla.HloSnapshot hlo_snapshot = 2; +} + +// Literal to allocate space for, and transfer to, device memory. +message XLAAllocation { + reserved 1; + xla.LiteralProto value = 2; +} + +// Node in a tree describing a tuple constructed from input handles. A +// node is an internal node if tuples is non-empty, in which case +// input_index and release_input_handle are ignored. Otherwise a node +// is a leaf node. Each leaf XLATupleNode is the index of an input +// which corresponds to a handle that will be grafted onto the output +// tuple at that location. If release_input_handle is true that input +// handle will be released and become invalid. Inputs may be repeated +// in which case leaves of the output tuple will alias. If an input is +// repeated, release_input_handle must be false for every leaf where +// that input appears. +// +// For example, if input 0 has shape {} and input 1 has shape {2,3} +// then the XLATupleNode with structure {1,{0,1}} corresponds to a +// tuple with shape {{2,3},{{},{2,3}}}. +message XLATupleNode { + int32 input_index = 1; + bool release_input_handle = 2; + repeated XLATupleNode tuples = 3; +} + +// Options for an XLA execution. +message XRTExecutionConfig { + // Local device to run on. This is present because the execute Op + // may be placed on a device such as CPU or TPU_SYSTEM that + // logically manages multiple cores. + int32 device_ordinal = 1; + // Which model-parallel computation to run from the compiled bundle. + int32 core_index_in_replica = 2; + // Optional key to disambiguate between executions. This is only + // needed if multiple host send/recvs may be outstanding + // concurrently with executions. + string execution_instance_key = 3; + // If non-zero, rng_seed to reset the core with. + uint32 rng_seed = 4; + // If true, release allocation handles on the inputs after running. + bool release_input_handles = 5; + // If true, release the handle to the computation after running. + bool release_compilation_handle = 6; + // If set to true, and the result shape is a tuple, then instead of returning + // a single tuple allocation the execution will return a vector of + // allocations, one for each of the first-level elements of the result tuple. + bool return_exploded_tuple = 7; +} + +message XRTChainedExecuteConfig { + // If non-zero, rng_seed to reset the core with. + uint32 rng_seed = 1; + // Which model-parallel computation to run from the compiled bundle. + int32 core_index_in_replica = 2; + // Optional key to disambiguate between executions. This is only needed if + // multiple host send/recvs may be outstanding concurrently with executions. + string execution_instance_key = 3; +} + +// A single chained execute operation. An operation can either be a device data +// load, or an existing (as in, previously compiled and accessible via its int64 +// handle) XLA computation execution. +message XRTChainedExecuteOp { + // Represents an input for this operation. + message Input { + // The index within the XRTChainedExecutePlan.ops post-order of the source + // operation for this input. + int64 op_index = 1; + // The output index of the value generated by the operation at op_index. + // Zero (default value) means no index ({}) while if an indexing is + // required, output_index needs to be set to index+1. + // Thanks proto3! + int64 output_index = 2; + } + // Represents an output of the XRTChainedExecute operation, which should + // originate by the output of this operation. + message Output { + // The index in the value generated by this operation, which should be + // forwarded as XRTChainedExecute output. If output_index is zero (default + // value) the whole output will be used as result. This means that if the + // output shape is a tuple, the result will be the full tuple. Otherwise the + // real sub-tuple index will be output_index - 1. + int64 output_index = 1; + // The index in the vector of the results returned by the XRTChainedExecute + // operation, where this output should be forwarded. + int64 result_index = 2; + } + + oneof op_oneof { + // The handle to an existing XRT device data. + int64 data_handle = 1; + // The handle to an existing XRT compiled computation. + int64 computation_handle = 2; + } + // The outputs of this XRTChainedExecuteOp operation. + repeated Output outputs = 3; + // The inputs of this XRTChainedExecuteOp operation. If data_handle is set, + // there are no inputs. + repeated Input inputs = 4; +} + +// Execution plan for the XRTChainedExecute operation. +message XRTChainedExecutePlan { + // The post order with the XRT computations to be executed. + repeated XRTChainedExecuteOp ops = 1; +} diff --git a/executor/proto/tensorflow/contrib/boosted_trees/proto/learner.proto b/executor/proto/tensorflow/contrib/boosted_trees/proto/learner.proto new file mode 100644 index 0000000000..fc5f158c07 --- /dev/null +++ b/executor/proto/tensorflow/contrib/boosted_trees/proto/learner.proto @@ -0,0 +1,161 @@ +syntax = "proto3"; + +package tensorflow.boosted_trees.learner; + +import "tensorflow/contrib/boosted_trees/proto/tree_config.proto"; + +option cc_enable_arenas = true; + +// Tree regularization config. +message TreeRegularizationConfig { + // Classic L1/L2. + float l1 = 1; + float l2 = 2; + + // Tree complexity penalizes overall model complexity effectively + // limiting how deep the tree can grow in regions with small gain. + float tree_complexity = 3; +} + +// Tree constraints config. +message TreeConstraintsConfig { + // Maximum depth of the trees. The default value is 6 if not specified. + uint32 max_tree_depth = 1; + + // Min hessian weight per node. + float min_node_weight = 2; + + // Maximum number of unique features used in the tree. Zero means there is no + // limit. + int64 max_number_of_unique_feature_columns = 3; +} + +// LearningRateConfig describes all supported learning rate tuners. +message LearningRateConfig { + oneof tuner { + LearningRateFixedConfig fixed = 1; + LearningRateDropoutDrivenConfig dropout = 2; + LearningRateLineSearchConfig line_search = 3; + } +} + +// Config for a fixed learning rate. +message LearningRateFixedConfig { + float learning_rate = 1; +} + +// Config for a tuned learning rate. +message LearningRateLineSearchConfig { + // Max learning rate. Must be strictly positive. + float max_learning_rate = 1; + + // Number of learning rate values to consider between [0, max_learning_rate). + int32 num_steps = 2; +} + +// When we have a sequence of trees 1, 2, 3 ... n, these essentially represent +// weights updates in functional space, and thus we can use averaging of weight +// updates to achieve better performance. For example, we can say that our final +// ensemble will be an average of ensembles of tree 1, and ensemble of tree 1 +// and tree 2 etc .. ensemble of all trees. +// Note that this averaging will apply ONLY DURING PREDICTION. The training +// stays the same. +message AveragingConfig { + oneof config { + float average_last_n_trees = 1; + // Between 0 and 1. If set to 1.0, we are averaging ensembles of tree 1, + // ensemble of tree 1 and tree 2, etc ensemble of all trees. If set to 0.5, + // last half of the trees are averaged etc. + float average_last_percent_trees = 2; + } +} + +message LearningRateDropoutDrivenConfig { + // Probability of dropping each tree in an existing so far ensemble. + float dropout_probability = 1; + + // When trees are built after dropout happen, they don't "advance" to the + // optimal solution, they just rearrange the path. However you can still + // choose to skip dropout periodically, to allow a new tree that "advances" + // to be added. + // For example, if running for 200 steps with probability of dropout 1/100, + // you would expect the dropout to start happening for sure for all iterations + // after 100. However you can add probability_of_skipping_dropout of 0.1, this + // way iterations 100-200 will include approx 90 iterations of dropout and 10 + // iterations of normal steps.Set it to 0 if you want just keep building + // the refinement trees after dropout kicks in. + float probability_of_skipping_dropout = 2; + + // Between 0 and 1. + float learning_rate = 3; +} + +message LearnerConfig { + enum PruningMode { + PRUNING_MODE_UNSPECIFIED = 0; + PRE_PRUNE = 1; + POST_PRUNE = 2; + } + + enum GrowingMode { + GROWING_MODE_UNSPECIFIED = 0; + WHOLE_TREE = 1; + LAYER_BY_LAYER = 2; + } + + enum MultiClassStrategy { + MULTI_CLASS_STRATEGY_UNSPECIFIED = 0; + TREE_PER_CLASS = 1; + FULL_HESSIAN = 2; + DIAGONAL_HESSIAN = 3; + } + + enum WeakLearnerType { + NORMAL_DECISION_TREE = 0; + OBLIVIOUS_DECISION_TREE = 1; + } + + // Number of classes. + uint32 num_classes = 1; + + // Fraction of features to consider in each tree sampled randomly + // from all available features. + oneof feature_fraction { + float feature_fraction_per_tree = 2; + float feature_fraction_per_level = 3; + }; + + // Regularization. + TreeRegularizationConfig regularization = 4; + + // Constraints. + TreeConstraintsConfig constraints = 5; + + // Pruning. POST_PRUNE is the default pruning mode. + PruningMode pruning_mode = 8; + + // Growing Mode. LAYER_BY_LAYER is the default growing mode. + GrowingMode growing_mode = 9; + + // Learning rate. By default we use fixed learning rate of 0.1. + LearningRateConfig learning_rate_tuner = 6; + + // Multi-class strategy. By default we use TREE_PER_CLASS for binary + // classification and linear regression. For other cases, we use + // DIAGONAL_HESSIAN as the default. + MultiClassStrategy multi_class_strategy = 10; + + // If you want to average the ensembles (for regularization), provide the + // config below. + AveragingConfig averaging_config = 11; + + // By default we use NORMAL_DECISION_TREE as weak learner. + WeakLearnerType weak_learner_type = 12; + + // If you want to enforce some splits and allow boosting to figure out the + // rest, you can provide a tree that represents the starting splits for each + // tree in the ensemble. + // Set both each_tree_start and each_tree_start_num_layers. + tensorflow.boosted_trees.trees.DecisionTreeConfig each_tree_start = 13; + int32 each_tree_start_num_layers = 14; +} diff --git a/executor/proto/tensorflow/contrib/boosted_trees/proto/quantiles.proto b/executor/proto/tensorflow/contrib/boosted_trees/proto/quantiles.proto new file mode 100644 index 0000000000..7f872d2aa7 --- /dev/null +++ b/executor/proto/tensorflow/contrib/boosted_trees/proto/quantiles.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +option cc_enable_arenas = true; + +package boosted_trees; + +message QuantileConfig { + // Maximum eps error when computing quantile summaries. + double eps = 1; + // Number of quantiles to generate. + int64 num_quantiles = 2; +} + +message QuantileEntry { + // Value for the entry. + float value = 1; + // Weight for the entry. + float weight = 2; + // We need the minimum and maximum rank possible for this entry. + // Rank is 0.0 for the absolute minimum and sum of the weights for the maximum + // value in the input. + float min_rank = 3; + float max_rank = 4; +} + +message QuantileSummaryState { + repeated QuantileEntry entries = 1; +} + +message QuantileStreamState { + repeated QuantileSummaryState summaries = 1; +} diff --git a/executor/proto/tensorflow/contrib/boosted_trees/proto/split_info.proto b/executor/proto/tensorflow/contrib/boosted_trees/proto/split_info.proto new file mode 100644 index 0000000000..784977af39 --- /dev/null +++ b/executor/proto/tensorflow/contrib/boosted_trees/proto/split_info.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +option cc_enable_arenas = true; + +package tensorflow.boosted_trees.learner; + +import "tensorflow/contrib/boosted_trees/proto/tree_config.proto"; + +// Gathered information for a split node. +message SplitInfo { + // The split node without the child nodes attached. + tensorflow.boosted_trees.trees.TreeNode split_node = 1; + + // Left Leaf node. + tensorflow.boosted_trees.trees.Leaf left_child = 2; + + // Right Leaf node. + tensorflow.boosted_trees.trees.Leaf right_child = 3; +} + +message ObliviousSplitInfo { + tensorflow.boosted_trees.trees.TreeNode split_node = 1; + repeated tensorflow.boosted_trees.trees.Leaf children = 2; + // For each child, children_parent_id stores the node_id of its parent when it + // was a leaf. For the idx-th child it corresponds the idx/2-th + // children_parent_id. + repeated int32 children_parent_id = 3; +} diff --git a/executor/proto/tensorflow/contrib/boosted_trees/proto/tree_config.proto b/executor/proto/tensorflow/contrib/boosted_trees/proto/tree_config.proto new file mode 100644 index 0000000000..520b4f8b11 --- /dev/null +++ b/executor/proto/tensorflow/contrib/boosted_trees/proto/tree_config.proto @@ -0,0 +1,172 @@ +syntax = "proto3"; +option cc_enable_arenas = true; + +package tensorflow.boosted_trees.trees; + +// TreeNode describes a node in a tree. +message TreeNode { + oneof node { + Leaf leaf = 1; + DenseFloatBinarySplit dense_float_binary_split = 2; + SparseFloatBinarySplitDefaultLeft sparse_float_binary_split_default_left = + 3; + SparseFloatBinarySplitDefaultRight sparse_float_binary_split_default_right = + 4; + CategoricalIdBinarySplit categorical_id_binary_split = 5; + CategoricalIdSetMembershipBinarySplit + categorical_id_set_membership_binary_split = 6; + ObliviousDenseFloatBinarySplit oblivious_dense_float_binary_split = 7; + ObliviousCategoricalIdBinarySplit oblivious_categorical_id_binary_split = 8; + } + TreeNodeMetadata node_metadata = 777; +} + +// TreeNodeMetadata encodes metadata associated with each node in a tree. +message TreeNodeMetadata { + // The gain associated with this node. + float gain = 1; + + // The original leaf node before this node was split. + Leaf original_leaf = 2; + + // The original layer of leaves before that layer was converted to a split. + repeated Leaf original_oblivious_leaves = 3; +} + +// Leaves can either hold dense or sparse information. +message Leaf { + oneof leaf { + // See third_party/tensorflow/contrib/decision_trees/ + // proto/generic_tree_model.proto + // for a description of how vector and sparse_vector might be used. + Vector vector = 1; + SparseVector sparse_vector = 2; + } +} + +message Vector { + repeated float value = 1; +} + +message SparseVector { + repeated int32 index = 1; + repeated float value = 2; +} + +// Split rule for dense float features. +message DenseFloatBinarySplit { + // Float feature column and split threshold describing + // the rule feature <= threshold. + int32 feature_column = 1; + // If feature column is multivalent, this holds the index of the dimension + // for the split. Defaults to 0. + int32 dimension_id = 5; + float threshold = 2; + + // Node children indexing into a contiguous + // vector of nodes starting from the root. + int32 left_id = 3; + int32 right_id = 4; +} + +// Split rule for sparse float features defaulting left for missing features. +message SparseFloatBinarySplitDefaultLeft { + DenseFloatBinarySplit split = 1; +} + +// Split rule for sparse float features defaulting right for missing features. +message SparseFloatBinarySplitDefaultRight { + DenseFloatBinarySplit split = 1; +} + +// Split rule for categorical features with a single feature Id. +message CategoricalIdBinarySplit { + // Categorical feature column and Id describing + // the rule feature == Id. + int32 feature_column = 1; + int64 feature_id = 2; + + // Node children indexing into a contiguous + // vector of nodes starting from the root. + int32 left_id = 3; + int32 right_id = 4; +} + +// Split rule for categorical features with a set of feature Ids. +message CategoricalIdSetMembershipBinarySplit { + // Categorical feature column and Id describing + // the rule feature โˆˆ feature_ids. + int32 feature_column = 1; + // Sorted list of Ids in the set. + repeated int64 feature_ids = 2; + + // Node children indexing into a contiguous + // vector of nodes starting from the root. + int32 left_id = 3; + int32 right_id = 4; +} + +// Split rule for dense float features in the oblivious case. +message ObliviousDenseFloatBinarySplit { + // Float feature column and split threshold describing + // the rule feature <= threshold. + int32 feature_column = 1; + float threshold = 2; + // We don't store children ids, because either the next node represents the + // whole next layer of the tree or starting with the next node we only have + // leaves. +} + +// Split rule for categorical features with a single feature Id in the oblivious +// case. +message ObliviousCategoricalIdBinarySplit { + // Categorical feature column and Id describing the rule feature == Id. + int32 feature_column = 1; + int64 feature_id = 2; + // We don't store children ids, because either the next node represents the + // whole next layer of the tree or starting with the next node we only have + // leaves. +} + +// DecisionTreeConfig describes a list of connected nodes. +// Node 0 must be the root and can carry any payload including a leaf +// in the case of representing the bias. +// Note that each node id is implicitly its index in the list of nodes. +message DecisionTreeConfig { + repeated TreeNode nodes = 1; +} + +message DecisionTreeMetadata { + // How many times tree weight was updated (due to reweighting of the final + // ensemble, dropout, shrinkage etc). + int32 num_tree_weight_updates = 1; + + // Number of layers grown for this tree. + int32 num_layers_grown = 2; + + // Whether the tree is finalized in that no more layers can be grown. + bool is_finalized = 3; +} + +message GrowingMetadata { + // Number of trees that we have attempted to build. After pruning, these + // trees might have been removed. + int64 num_trees_attempted = 1; + // Number of layers that we have attempted to build. After pruning, these + // layers might have been removed. + int64 num_layers_attempted = 2; + + // Sorted list of column handlers that have been used in at least one split + // so far. + repeated int64 used_handler_ids = 3; +} + +// DecisionTreeEnsembleConfig describes an ensemble of decision trees. +message DecisionTreeEnsembleConfig { + repeated DecisionTreeConfig trees = 1; + repeated float tree_weights = 2; + repeated DecisionTreeMetadata tree_metadata = 3; + + // Metadata that is used during the training. + GrowingMetadata growing_metadata = 4; +} diff --git a/executor/proto/tensorflow/contrib/cloud/kernels/bigquery_table_partition.proto b/executor/proto/tensorflow/contrib/cloud/kernels/bigquery_table_partition.proto new file mode 100644 index 0000000000..2d9d1380db --- /dev/null +++ b/executor/proto/tensorflow/contrib/cloud/kernels/bigquery_table_partition.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package tensorflow; + +// This proto specifies a table partition in BigQuery. +message BigQueryTablePartition { + // [start_index, end_index] specify the boundaries of a partition. + // If end_index is -1, every row starting from start_index is part of the + // partition. + int64 start_index = 1; + int64 end_index = 2; +}; diff --git a/executor/proto/tensorflow/contrib/decision_trees/proto/generic_tree_model.proto b/executor/proto/tensorflow/contrib/decision_trees/proto/generic_tree_model.proto new file mode 100644 index 0000000000..dd80b37f52 --- /dev/null +++ b/executor/proto/tensorflow/contrib/decision_trees/proto/generic_tree_model.proto @@ -0,0 +1,183 @@ +// Generic representation of tree-based models. + +// This proto establishes a shared standard: "fully compatible" projects should +// provide support for all reasonable models expressed through it. Therefore, +// it should be kept as simple as possible, and should never contain +// project-specific design choices. + +// Status: work in progress. This proto can change anytime without notice. + +syntax = "proto3"; +option cc_enable_arenas = true; + +package tensorflow.decision_trees; + +import "google/protobuf/any.proto"; +import "google/protobuf/wrappers.proto"; + +// A generic handle for any type of model. +message Model { + oneof model { + DecisionTree decision_tree = 1; + Ensemble ensemble = 2; + google.protobuf.Any custom_model = 3; + } + repeated google.protobuf.Any additional_data = 4; +} + +message ModelAndFeatures { + message Feature { + // TODO(jonasz): Remove this field, as it's confusing. Ctx: cr/153569450. + FeatureId feature_id = 1 [deprecated = true]; + repeated google.protobuf.Any additional_data = 2; + }; + // Given a FeatureId feature_id, the feature's description is in + // features[feature_id.id.value]. + map features = 1; + Model model = 2; + repeated google.protobuf.Any additional_data = 3; +} + +// An ordered sequence of models. This message can be used to express bagged or +// boosted models, as well as custom ensembles. +message Ensemble { + message Member { + Model submodel = 1; + google.protobuf.Int32Value submodel_id = 2; + repeated google.protobuf.Any additional_data = 3; + } + repeated Member members = 100; // A higher id for more readable printing. + + // The presence of a certain combination_technique indicates how to combine + // the outputs of member models in order to compute the ensemble's output. + oneof combination_technique { + Summation summation_combination_technique = 1; + Averaging averaging_combination_technique = 2; + google.protobuf.Any custom_combination_technique = 3; + } + repeated google.protobuf.Any additional_data = 4; +} + +// When present, the Ensemble's output is the sum of member models' outputs. +message Summation { + repeated google.protobuf.Any additional_data = 1; +}; + + +// When present, the Ensemble's output is the average of member models' outputs. +message Averaging { + repeated google.protobuf.Any additional_data = 1; +}; + + +message DecisionTree { + repeated TreeNode nodes = 1; + repeated google.protobuf.Any additional_data = 2; +}; + + +message TreeNode { + // Following fields are provided for convenience and better readability. + // Filling them in is not required. + google.protobuf.Int32Value node_id = 1; + google.protobuf.Int32Value depth = 2; + google.protobuf.Int32Value subtree_size = 3; + + oneof node_type { + BinaryNode binary_node = 4; + Leaf leaf = 5; + google.protobuf.Any custom_node_type = 6; + } + + repeated google.protobuf.Any additional_data = 7; +} + + +message BinaryNode { + google.protobuf.Int32Value left_child_id = 1; + google.protobuf.Int32Value right_child_id = 2; + enum Direction { + LEFT = 0; + RIGHT = 1; + } + // When left_child_test is undefined for a particular datapoint (e.g. because + // it's not defined when feature value is missing), the datapoint should go + // in this direction. + Direction default_direction = 3; + // When a datapoint satisfies the test, it should be propagated to the left + // child. + oneof left_child_test { + InequalityTest inequality_left_child_test = 4; + google.protobuf.Any custom_left_child_test = 5; + } +}; + +// A SparseVector represents a vector in which only certain select elements +// are non-zero. Maps labels to values (e.g. class id to probability or count). +message SparseVector { + map sparse_value = 1; +} + +message Vector { + repeated Value value = 1; +} + +message Leaf { + oneof leaf { + // The interpretation of the values held in the leaves of a decision tree + // is application specific, but some common cases are: + // 1) len(vector) = 1, and the floating point value[0] holds the class 0 + // probability in a two class classification problem. + // 2) len(vector) = 1, and the integer value[0] holds the class prediction. + // 3) The floating point value[i] holds the class i probability prediction. + // 4) The floating point value[i] holds the i-th component of the + // vector prediction in a regression problem. + // 5) sparse_vector holds the sparse class predictions for a classification + // problem with a large number of classes. + Vector vector = 1; + SparseVector sparse_vector = 2; + } + // For non-standard handling of leaves. + repeated google.protobuf.Any additional_data = 3; +}; + + +message FeatureId { + google.protobuf.StringValue id = 1; + repeated google.protobuf.Any additional_data = 2; +}; + +message ObliqueFeatures { + // total value is sum(features[i] * weights[i]). + repeated FeatureId features = 1; + repeated float weights = 2; +} + + +message InequalityTest { + // When the feature is missing, the test's outcome is undefined. + oneof FeatureSum { + FeatureId feature_id = 1; + ObliqueFeatures oblique = 4; + } + enum Type { + LESS_OR_EQUAL = 0; + LESS_THAN = 1; + GREATER_OR_EQUAL = 2; + GREATER_THAN = 3; + }; + Type type = 2; + Value threshold = 3; +}; + + +// Represents a single value of any type, e.g. 5 or "abc". +message Value { + oneof value { + float float_value = 1; + double double_value = 2; + int32 int32_value = 3; + int64 int64_value = 4; + google.protobuf.Any custom_value = 5; + } +}; diff --git a/executor/proto/tensorflow/contrib/decision_trees/proto/generic_tree_model_extensions.proto b/executor/proto/tensorflow/contrib/decision_trees/proto/generic_tree_model_extensions.proto new file mode 100644 index 0000000000..2a41b321b7 --- /dev/null +++ b/executor/proto/tensorflow/contrib/decision_trees/proto/generic_tree_model_extensions.proto @@ -0,0 +1,30 @@ +// Messages in this file are not part of the basic standard established by +// generic_tree_model.proto (see the toplevel comment in that file). + +syntax = "proto3"; + +package tensorflow.decision_trees; + +import "tensorflow/contrib/decision_trees/proto/generic_tree_model.proto"; + +// Used in generic_tree_model.BinaryNode.left_child_test. +// Tests whether the feature's value belongs to the specified list, +// (or does not belong if inverse=True). +// For empty list use ConstResultTest instead. +message MatchingValuesTest { + // When the feature is missing, the test's outcome is undefined. + FeatureId feature_id = 1; + repeated Value value = 2; + bool inverse = 3; +} + +// Used in generic_tree_model.BinaryNode.left_child_test. +// Returns test_result if feature value is not missed. Otherwise +// BinaryNode.default_direction is used. +message ConstResultTest { + FeatureId feature_id = 1; + // value_for_dtype is used to store the type of the feature. The value itself + // should be ignored, only its type is used. + Value value_for_dtype = 2; + bool test_result = 3; +} diff --git a/executor/proto/tensorflow/contrib/gdr/gdr.proto b/executor/proto/tensorflow/contrib/gdr/gdr.proto new file mode 100644 index 0000000000..bd438787c3 --- /dev/null +++ b/executor/proto/tensorflow/contrib/gdr/gdr.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; + +message RemoteMemoryRegion { + string host = 1; + string port = 2; + uint64 addr = 3; + uint32 rkey = 4; + uint32 tensor_key = 5; +} diff --git a/executor/proto/tensorflow/contrib/mpi/mpi_msg.proto b/executor/proto/tensorflow/contrib/mpi/mpi_msg.proto new file mode 100644 index 0000000000..36f1504901 --- /dev/null +++ b/executor/proto/tensorflow/contrib/mpi/mpi_msg.proto @@ -0,0 +1,19 @@ + +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; + +import "tensorflow/core/protobuf/worker.proto"; + + +message MPIRecvTensorResponse { + RecvTensorResponse response = 1; + bool singleSend = 2; + string key = 3; + int64 step_id = 4; + uint64 checksum = 5; +} + + + diff --git a/executor/proto/tensorflow/contrib/mpi_collectives/mpi_message.proto b/executor/proto/tensorflow/contrib/mpi_collectives/mpi_message.proto new file mode 100644 index 0000000000..afbce981ae --- /dev/null +++ b/executor/proto/tensorflow/contrib/mpi_collectives/mpi_message.proto @@ -0,0 +1,64 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto3"; + +package tensorflow.contrib.mpi_collectives; + +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/types.proto"; + +// An MPIRequest is a message sent from a rank greater than zero to the +// coordinator (rank zero), informing the coordinator of an operation that +// the rank wants to do and the tensor that it wants to apply the operation to. +message MPIRequest { + enum RequestType { + ALLREDUCE = 0; + ALLGATHER = 1; + } + + // The request rank is necessary to create a consistent ordering of results, + // for example in the allgather where the order of outputs should be sorted + // by rank. + int32 request_rank = 1; + RequestType request_type = 2; + DataType tensor_type = 3; + string tensor_name = 4; + TensorShapeProto tensor_shape = 5; +}; + +// An MPIResponse is a message sent from the coordinator (rank zero) to a rank +// greater than zero, informing the rank of an operation should be performed +// now. If the operation requested would result in an error (for example, due +// to a type or shape mismatch), then the MPIResponse can contain an error and +// an error message instead. Finally, an MPIResponse can be a DONE message (if +// there are no more tensors to reduce on this tick of the background loop) or +// SHUTDOWN if all MPI processes should shut down. +message MPIResponse { + enum ResponseType { + ALLREDUCE = 0; + ALLGATHER = 1; + ERROR = 2; + DONE = 3; + SHUTDOWN = 4; + } + + // Empty if the type is DONE or SHUTDOWN. + ResponseType response_type = 1; + string tensor_name = 2; + + // Empty unless response_type is ERROR. + string error_message = 3; +}; diff --git a/executor/proto/tensorflow/contrib/rpc/python/kernel_tests/test_example.proto b/executor/proto/tensorflow/contrib/rpc/python/kernel_tests/test_example.proto new file mode 100644 index 0000000000..8141466349 --- /dev/null +++ b/executor/proto/tensorflow/contrib/rpc/python/kernel_tests/test_example.proto @@ -0,0 +1,32 @@ +// Test description and protos to work with it. + +syntax = "proto2"; + +package tensorflow.contrib.rpc; + +// A TestCase holds a sequence of values. +message TestCase { + repeated int32 values = 1; +}; + +service TestCaseService { + // Copy input, and increment each entry in 'values' by 1. + rpc Increment(TestCase) returns (TestCase) { + } + + // Sleep forever. + rpc SleepForever(TestCase) returns (TestCase) { + } + + // Sleep forever 50% of the time, return immediately the other 50%. + rpc SometimesSleepForever(TestCase) returns (TestCase) { + } + + // Always fails with InvalidArgument. + rpc AlwaysFailWithInvalidArgument(TestCase) returns (TestCase) { + } + + // Fails with InvalidArgument 50% of the time. + rpc SometimesFailWithInvalidArgument(TestCase) returns (TestCase) { + } +}; diff --git a/executor/proto/tensorflow/contrib/session_bundle/manifest.proto b/executor/proto/tensorflow/contrib/session_bundle/manifest.proto new file mode 100644 index 0000000000..482ed372dc --- /dev/null +++ b/executor/proto/tensorflow/contrib/session_bundle/manifest.proto @@ -0,0 +1,70 @@ +syntax = "proto3"; + +package tensorflow.serving; + +// Signatures of model export. +message Signatures { + // Default signature of the graph. + // WARNING(break-tutorial-inline-code): The following code snippet is + // in-lined in tutorials, please update tutorial documents accordingly + // whenever code changes. + Signature default_signature = 1; + + // Named signatures of the graph. + map named_signatures = 2; +}; + +// A binding to a tensor including the name and, possibly in the future, type +// or other metadata. For example, this may specify whether a tensor supports +// batch vs single inference. +message TensorBinding { + // The name of the tensor to bind to. + string tensor_name = 1; +}; + +// An asset file or set of sharded files with the same name that will be bound +// to a tensor at init / session_bundle load time. +message AssetFile { + // The tensor to bind the asset filename to. + TensorBinding tensor_binding = 1; + // The filename within the assets directory. Note: does not include the base + // path or asset directory prefix. Base paths can and will change when models + // are deployed for serving. + string filename = 2; +} + +// A Signature specifies the inputs and outputs of commonly used graphs. +message Signature { + oneof type { + RegressionSignature regression_signature = 1; + ClassificationSignature classification_signature = 2; + GenericSignature generic_signature = 3; + } +}; + +// RegressionSignature specifies a graph that takes an input and returns an +// output. +message RegressionSignature { + TensorBinding input = 1; + TensorBinding output = 2; +}; + +// ClassificationSignature specifies a graph that takes an input and returns +// classes and their scores. +// WARNING(break-tutorial-inline-code): The following code snippet is +// in-lined in tutorials, please update tutorial documents accordingly +// whenever code changes. +message ClassificationSignature { + TensorBinding input = 1; + TensorBinding classes = 2; + TensorBinding scores = 3; +}; + +// GenericSignature specifies a map from logical name to Tensor name. +// Typical application of GenericSignature is to use a single GenericSignature +// that includes all of the Tensor nodes and target names that may be useful at +// serving, analysis or debugging time. The recommended name for this signature +// in the ModelManifest is "generic_bindings". +message GenericSignature { + map map = 1; +}; diff --git a/executor/proto/tensorflow/contrib/tensor_forest/proto/fertile_stats.proto b/executor/proto/tensorflow/contrib/tensor_forest/proto/fertile_stats.proto new file mode 100644 index 0000000000..d568fa3081 --- /dev/null +++ b/executor/proto/tensorflow/contrib/tensor_forest/proto/fertile_stats.proto @@ -0,0 +1,99 @@ +syntax = "proto3"; +option cc_enable_arenas = true; + +package tensorflow.tensorforest; + +import "tensorflow/contrib/decision_trees/proto/generic_tree_model.proto"; + + +message FertileStats { + // Tracks stats for each node. node_to_slot[i] is the FertileSlot for node i. + // This may be sized to max_nodes initially, or grow dynamically as needed. + repeated FertileSlot node_to_slot = 1; +} + + +message GiniStats { + // This allows us to quickly track and calculate impurity (classification) + // by storing the sum of input weights and the sum of the squares of the + // input weights. Weighted gini is then: 1 - (square / sum * sum). + // Updates to these numbers are: + // old_i = leaf->value(label) + // new_i = old_i + incoming_weight + // sum -> sum + incoming_weight + // square -> square - (old_i ^ 2) + (new_i ^ 2) + // total_left_sum -> total_left_sum - old_left_i * old_total_i + + // new_left_i * new_total_i + float square = 2; +} + +message LeafStat { + // The sum of the weights of the training examples that we have seen. + // This is here, outside of the leaf_stat oneof, because almost all + // types will want it. + float weight_sum = 3; + + // TODO(thomaswc): Move the GiniStats out of LeafStats and into something + // that only tracks them for splits. + message GiniImpurityClassificationStats { + oneof counts { + decision_trees.Vector dense_counts = 1; + decision_trees.SparseVector sparse_counts = 2; + } + GiniStats gini = 3; + } + + // This is the info needed for calculating variance for regression. + // Variance will still have to be summed over every output, but the + // number of outputs in regression problems is almost always 1. + message LeastSquaresRegressionStats { + decision_trees.Vector mean_output = 1; + decision_trees.Vector mean_output_squares = 2; + } + + oneof leaf_stat { + GiniImpurityClassificationStats classification = 1; + LeastSquaresRegressionStats regression = 2; + // TODO(thomaswc): Add in v5's SparseClassStats. + } +} + +message FertileSlot { + // The statistics for *all* the examples seen at this leaf. + LeafStat leaf_stats = 4; + + repeated SplitCandidate candidates = 1; + + // The statistics for the examples seen at this leaf after all the + // splits have been initialized. If post_init_leaf_stats.weight_sum + // is > 0, then all candidates have been initialized. We need to track + // both leaf_stats and post_init_leaf_stats because the first is used + // to create the decision_tree::Leaf and the second is used to infer + // the statistics for the right side of a split (given the leaf side + // stats). + LeafStat post_init_leaf_stats = 6; + + int32 node_id = 5; + int32 depth = 7; +} + +message SplitCandidate { + // proto representing the potential node. + decision_trees.BinaryNode split = 1; + + // Right counts are inferred from FertileSlot.leaf_stats and left. + LeafStat left_stats = 4; + + // Right stats (not full counts) are kept here. + LeafStat right_stats = 5; + + // Fields used when training with a graph runner. + string unique_id = 6; +} + +// Proto used for tracking tree paths during inference time. +message TreePath { + // Nodes are listed in order that they were traversed. i.e. nodes_visited[0] + // is the tree's root node. + repeated decision_trees.TreeNode nodes_visited = 1; +} diff --git a/executor/proto/tensorflow/contrib/tensor_forest/proto/tensor_forest_params.proto b/executor/proto/tensorflow/contrib/tensor_forest/proto/tensor_forest_params.proto new file mode 100644 index 0000000000..4545a8a675 --- /dev/null +++ b/executor/proto/tensorflow/contrib/tensor_forest/proto/tensor_forest_params.proto @@ -0,0 +1,154 @@ +syntax = "proto3"; + +package tensorflow.tensorforest; + +import "tensorflow/contrib/decision_trees/proto/generic_tree_model.proto"; + +// Leaf models specify what is returned at inference time, and how it is +// stored in the decision_trees.Leaf protos. +enum LeafModelType { + MODEL_DENSE_CLASSIFICATION = 0; + MODEL_SPARSE_CLASSIFICATION = 1; + MODEL_REGRESSION = 2; + MODEL_SPARSE_OR_DENSE_CLASSIFICATION = 3; +} + +// Stats models generally specify information that is collected which is +// necessary to choose a split at a node. Specifically, they operate on +// a SplitCandidate::LeafStat proto. +enum StatsModelType { + STATS_DENSE_GINI = 0; + STATS_SPARSE_GINI = 1; + STATS_LEAST_SQUARES_REGRESSION = 2; + // STATS_SPARSE_THEN_DENSE_GINI is deprecated and no longer supported. + STATS_SPARSE_THEN_DENSE_GINI = 3; + STATS_FIXED_SIZE_SPARSE_GINI = 4; +} + +// Allows selection of operations on the collection of split candidates. +// Basic infers right split stats from the leaf stats and each candidate's +// left stats. +enum SplitCollectionType { + COLLECTION_BASIC = 0; + GRAPH_RUNNER_COLLECTION = 1; +} + +// Pruning strategies define how candidates are pruned over time. +// SPLIT_PRUNE_HALF prunes the worst half of splits every prune_ever_samples, +// etc. Note that prune_every_samples plays against the depth-dependent +// split_after_samples, so they should be set together. +enum SplitPruningStrategyType { + SPLIT_PRUNE_NONE = 0; + SPLIT_PRUNE_HALF = 1; + SPLIT_PRUNE_QUARTER = 2; + SPLIT_PRUNE_10_PERCENT = 3; + // SPLIT_PRUNE_HOEFFDING prunes splits whose Gini impurity is worst than + // the best split's by more than the Hoeffding bound. + SPLIT_PRUNE_HOEFFDING = 4; +} + +message SplitPruningConfig { + DepthDependentParam prune_every_samples = 1; + SplitPruningStrategyType type = 2; +} + +// Finish strategies define when slots are considered finished. +// Basic requires at least split_after_samples, and doesn't allow slots to +// finish until the leaf has received more than one class. Hoeffding splits +// early after min_split_samples if one split is dominating the rest according +// to hoeffding bounds. Bootstrap does the same but compares gini's calculated +// with sampled smoothed counts. +enum SplitFinishStrategyType { + SPLIT_FINISH_BASIC = 0; + SPLIT_FINISH_DOMINATE_HOEFFDING = 2; + SPLIT_FINISH_DOMINATE_BOOTSTRAP = 3; +} + +message SplitFinishConfig { + // Configure how often we check for finish, because some finish methods + // are expensive to perform. + DepthDependentParam check_every_steps = 1; + SplitFinishStrategyType type = 2; +} + +// A parameter that changes linearly with depth, with upper and lower bounds. +message LinearParam { + float slope = 1; + float y_intercept = 2; + float min_val = 3; + float max_val = 4; +} + +// A parameter that changes expoentially with the form +// f = c + mb^(k*d) +// where: +// c: constant bias +// b: base +// m: multiplier +// k: depth multiplier +// d: depth +message ExponentialParam { + float bias = 1; + float base = 2; + float multiplier = 3; + float depth_multiplier = 4; +} + +// A parameter that is 'off' until depth >= a threshold, then is 'on'. +message ThresholdParam { + float on_value = 1; + float off_value = 2; + float threshold = 3; +} + +// A parameter that may change with node depth. +message DepthDependentParam { + oneof ParamType { + float constant_value = 1; + LinearParam linear = 2; + ExponentialParam exponential = 3; + ThresholdParam threshold = 4; + } +} + +message TensorForestParams { + // ------------ Types that control training subsystems ------ // + LeafModelType leaf_type = 1; + StatsModelType stats_type = 2; + SplitCollectionType collection_type = 3; + SplitPruningConfig pruning_type = 4; + SplitFinishConfig finish_type = 5; + + // --------- Parameters that can't change by definition --------------- // + int32 num_trees = 6; + int32 max_nodes = 7; + int32 num_features = 21; + + decision_trees.InequalityTest.Type inequality_test_type = 19; + + // Some booleans controlling execution + bool is_regression = 8; + bool drop_final_class = 9; + bool collate_examples = 10; + bool checkpoint_stats = 11; + bool use_running_stats_method = 20; + bool initialize_average_splits = 22; + bool inference_tree_paths = 23; + + // Number of classes (classification) or targets (regression) + int32 num_outputs = 12; + + // --------- Parameters that could be depth-dependent --------------- // + DepthDependentParam num_splits_to_consider = 13; + DepthDependentParam split_after_samples = 14; + DepthDependentParam dominate_fraction = 15; + DepthDependentParam min_split_samples = 18; + + // --------- Parameters for experimental features ---------------------- // + string graph_dir = 16; + int32 num_select_features = 17; + + // When using a FixedSizeSparseClassificationGrowStats, keep track of + // this many classes. + int32 num_classes_to_track = 24; +} diff --git a/executor/proto/tensorflow/contrib/tensorboard/plugins/projector/projector_config.proto b/executor/proto/tensorflow/contrib/tensorboard/plugins/projector/projector_config.proto new file mode 100644 index 0000000000..a9b18df9c1 --- /dev/null +++ b/executor/proto/tensorflow/contrib/tensorboard/plugins/projector/projector_config.proto @@ -0,0 +1,46 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto3"; + +package tensorflow; + +message SpriteMetadata { + string image_path = 1; + // [width, height] of a single image in the sprite. + repeated uint32 single_image_dim = 2; +} + +message EmbeddingInfo { + string tensor_name = 1; + string metadata_path = 2; + string bookmarks_path = 3; + // Shape of the 2D tensor [N x D]. If missing, it will be inferred from the + // model checkpoint. + repeated uint32 tensor_shape = 4; + SpriteMetadata sprite = 5; + // Path to the TSV file holding the tensor values. If missing, the tensor + // is assumed to be stored in the model checkpoint. + string tensor_path = 6; +} + +message ProjectorConfig { + // Path to the checkpoint file. Use either this or model_checkpoint_dir. + string model_checkpoint_path = 1; + repeated EmbeddingInfo embeddings = 2; + // Path to the checkpoint directory. The directory will be scanned for the + // latest checkpoint file. + string model_checkpoint_dir = 3; +} diff --git a/executor/proto/tensorflow/contrib/training/python/training/hparam.proto b/executor/proto/tensorflow/contrib/training/python/training/hparam.proto new file mode 100644 index 0000000000..67462cd9cf --- /dev/null +++ b/executor/proto/tensorflow/contrib/training/python/training/hparam.proto @@ -0,0 +1,52 @@ +// Copyright 2016 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================= + +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; + +// Protocol buffer holding hyper parameters. +// Examples of hyper parameters: +// learning_rate = 0.1, +// num_hidden_units = 100, +// activations = ['relu', 'tanh'] +message HParamDef { + message BytesList { + repeated bytes value = 1; + } + message FloatList { + repeated float value = 1 [packed = true]; + } + message Int64List { + repeated int64 value = 1 [packed = true]; + } + message BoolList { + repeated bool value = 1 [packed = true]; + } + message HParamType { + oneof kind { + int64 int64_value = 1; + float float_value = 2; + bytes bytes_value = 3; + bool bool_value = 7; + Int64List int64_list = 4; + FloatList float_list = 5; + BytesList bytes_list = 6; + BoolList bool_list = 8; + } + }; + map hparam = 1; +} diff --git a/executor/proto/tensorflow/contrib/verbs/verbs_service.proto b/executor/proto/tensorflow/contrib/verbs/verbs_service.proto new file mode 100644 index 0000000000..abdae1d84f --- /dev/null +++ b/executor/proto/tensorflow/contrib/verbs/verbs_service.proto @@ -0,0 +1,68 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto3"; + +package tensorflow; +option java_outer_classname = "VerbsServiceProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.contrib.verbs"; + +//////////////////////////////////////////////////////////////////////////////// +// +// GRPC Helper messages used to exchange RDMA information. +// +//////////////////////////////////////////////////////////////////////////////// + +message Channel { + int32 lid = 1; + int32 qpn = 2; + int32 psn = 3; + uint64 snp = 4; + uint64 iid = 5; +} + +message MemoryRegion { + uint64 remote_addr = 1; + uint32 rkey = 2; +} +message GetRemoteAddressRequest { + string host_name = 1; + Channel channel = 2; + repeated MemoryRegion mr = 3; +} + +message GetRemoteAddressResponse { + string host_name = 1; + Channel channel = 2; + repeated MemoryRegion mr = 3; +} + +message ErrorStatusProto { + int32 error_code = 1; + string error_message = 2; + string error_details = 3; +} + +//////////////////////////////////////////////////////////////////////////////// +// +// VerbsService +// +//////////////////////////////////////////////////////////////////////////////// + +service VerbsService { + rpc GetRemoteAddress(GetRemoteAddressRequest) + returns (GetRemoteAddressResponse); +} diff --git a/executor/proto/tensorflow/core/debug/debug_service.proto b/executor/proto/tensorflow/core/debug/debug_service.proto new file mode 100644 index 0000000000..4bef74dfc5 --- /dev/null +++ b/executor/proto/tensorflow/core/debug/debug_service.proto @@ -0,0 +1,100 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto3"; + +package tensorflow; + +import "tensorflow/core/framework/tensor.proto"; +import "tensorflow/core/profiler/tfprof_log.proto"; +import "tensorflow/core/protobuf/debug.proto"; +import "tensorflow/core/util/event.proto"; + +// Reply message from EventListener to the client, i.e., to the source of the +// Event protocol buffers, e.g., debug ops inserted by a debugged runtime to a +// TensorFlow graph being executed. +message EventReply { + message DebugOpStateChange { + enum State { + STATE_UNSPECIFIED = 0; + DISABLED = 1; + READ_ONLY = 2; + READ_WRITE = 3; + } + + State state = 1; + string node_name = 2; + int32 output_slot = 3; + string debug_op = 4; + } + + repeated DebugOpStateChange debug_op_state_changes = 1; + + // New tensor value to override the current tensor value with. + TensorProto tensor = 2; + // TODO(cais): Make use of this field to implement overriding of tensor value + // during debugging. +} + +// Data on the traceback of a debugged call, e.g., a Session.run() call, or the +// execution of an eager operation. +message CallTraceback { + enum CallType { + UNSPECIFIED = 0; + GRAPH_EXECUTION = 1; + EAGER_EXECUTION = 2; + } + + CallType call_type = 1; + + // A key for the call. For example, for graph execution, this is a key + // consisting of the names of the fed and fetched tensors. + string call_key = 2; + + // Traceback stack for the origin of the call event. + // For graph execution, this is the stack of the Session.run() call. + // For eager execution, this is the stack of the Python line that invokes + // the execution of the eager op. + tfprof.CodeDef origin_stack = 3; + + // Keeps track of the mapping from integer IDs in `origin_stack` to actual + // string values (e.g., file paths, function names). + map origin_id_to_string = 4; + + // Traceback for the graph (if any) involved in the call. + tfprof.OpLogProto graph_traceback = 5; + + // Version of the graph in `graph_traceback` (if any). + int64 graph_version = 6; +} + +// EventListener: Receives Event protos, e.g., from debugged TensorFlow +// runtime(s). +service EventListener { + // Client(s) can use this RPC method to send the EventListener Event protos. + // The Event protos can hold information such as: + // 1) intermediate tensors from a debugged graph being executed, which can + // be sent from DebugIdentity ops configured with grpc URLs. + // 2) GraphDefs of partition graphs, which can be sent from special debug + // ops that get executed immediately after the beginning of the graph + // execution. + rpc SendEvents(stream Event) returns (stream EventReply); + + // Send the tracebacks of a TensorFlow execution call. + rpc SendTracebacks(CallTraceback) returns (EventReply); + + // Send a collection of source code files being debugged. + rpc SendSourceFiles(DebuggedSourceFiles) returns (EventReply); +} diff --git a/executor/proto/tensorflow/core/debug/debugger_event_metadata.proto b/executor/proto/tensorflow/core/debug/debugger_event_metadata.proto new file mode 100644 index 0000000000..8bdedb1a50 --- /dev/null +++ b/executor/proto/tensorflow/core/debug/debugger_event_metadata.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package third_party.tensorflow.core.debug; + +// Encapsulates per-event data related to debugging. +message DebuggerEventMetadata { + string device = 1; + int32 output_slot = 2; + int32 num_chunks = 3; + int32 chunk_index = 4; +}; diff --git a/executor/proto/tensorflow/core/example/example.pb.go b/executor/proto/tensorflow/core/example/example.pb.go new file mode 100644 index 0000000000..fea74f8152 --- /dev/null +++ b/executor/proto/tensorflow/core/example/example.pb.go @@ -0,0 +1,134 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/example/example.proto + +package example + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Example struct { + Features *Features `protobuf:"bytes,1,opt,name=features,proto3" json:"features,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Example) Reset() { *m = Example{} } +func (m *Example) String() string { return proto.CompactTextString(m) } +func (*Example) ProtoMessage() {} +func (*Example) Descriptor() ([]byte, []int) { + return fileDescriptor_ded3cec6177e03bb, []int{0} +} + +func (m *Example) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Example.Unmarshal(m, b) +} +func (m *Example) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Example.Marshal(b, m, deterministic) +} +func (m *Example) XXX_Merge(src proto.Message) { + xxx_messageInfo_Example.Merge(m, src) +} +func (m *Example) XXX_Size() int { + return xxx_messageInfo_Example.Size(m) +} +func (m *Example) XXX_DiscardUnknown() { + xxx_messageInfo_Example.DiscardUnknown(m) +} + +var xxx_messageInfo_Example proto.InternalMessageInfo + +func (m *Example) GetFeatures() *Features { + if m != nil { + return m.Features + } + return nil +} + +type SequenceExample struct { + Context *Features `protobuf:"bytes,1,opt,name=context,proto3" json:"context,omitempty"` + FeatureLists *FeatureLists `protobuf:"bytes,2,opt,name=feature_lists,json=featureLists,proto3" json:"feature_lists,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SequenceExample) Reset() { *m = SequenceExample{} } +func (m *SequenceExample) String() string { return proto.CompactTextString(m) } +func (*SequenceExample) ProtoMessage() {} +func (*SequenceExample) Descriptor() ([]byte, []int) { + return fileDescriptor_ded3cec6177e03bb, []int{1} +} + +func (m *SequenceExample) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SequenceExample.Unmarshal(m, b) +} +func (m *SequenceExample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SequenceExample.Marshal(b, m, deterministic) +} +func (m *SequenceExample) XXX_Merge(src proto.Message) { + xxx_messageInfo_SequenceExample.Merge(m, src) +} +func (m *SequenceExample) XXX_Size() int { + return xxx_messageInfo_SequenceExample.Size(m) +} +func (m *SequenceExample) XXX_DiscardUnknown() { + xxx_messageInfo_SequenceExample.DiscardUnknown(m) +} + +var xxx_messageInfo_SequenceExample proto.InternalMessageInfo + +func (m *SequenceExample) GetContext() *Features { + if m != nil { + return m.Context + } + return nil +} + +func (m *SequenceExample) GetFeatureLists() *FeatureLists { + if m != nil { + return m.FeatureLists + } + return nil +} + +func init() { + proto.RegisterType((*Example)(nil), "tensorflow.Example") + proto.RegisterType((*SequenceExample)(nil), "tensorflow.SequenceExample") +} + +func init() { + proto.RegisterFile("tensorflow/core/example/example.proto", fileDescriptor_ded3cec6177e03bb) +} + +var fileDescriptor_ded3cec6177e03bb = []byte{ + // 212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x2d, 0x49, 0xcd, 0x2b, + 0xce, 0x2f, 0x4a, 0xcb, 0xc9, 0x2f, 0xd7, 0x4f, 0xce, 0x2f, 0x4a, 0xd5, 0x4f, 0xad, 0x48, 0xcc, + 0x2d, 0xc8, 0x81, 0xd3, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x5c, 0x08, 0x65, 0x52, 0x38, + 0xb5, 0xa4, 0xa5, 0x26, 0x96, 0x94, 0x16, 0x41, 0xb5, 0x28, 0x59, 0x73, 0xb1, 0xbb, 0x42, 0x24, + 0x84, 0x0c, 0xb8, 0x38, 0xa0, 0x72, 0xc5, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xdc, 0x46, 0x22, 0x7a, + 0x08, 0x43, 0xf4, 0xdc, 0xa0, 0x72, 0x41, 0x70, 0x55, 0x4a, 0x0d, 0x8c, 0x5c, 0xfc, 0xc1, 0xa9, + 0x85, 0xa5, 0xa9, 0x79, 0xc9, 0xa9, 0x30, 0x53, 0xf4, 0xb8, 0xd8, 0x93, 0xf3, 0xf3, 0x4a, 0x52, + 0x2b, 0x4a, 0xf0, 0x1a, 0x02, 0x53, 0x24, 0x64, 0xcb, 0xc5, 0x0b, 0x35, 0x2f, 0x3e, 0x27, 0xb3, + 0xb8, 0xa4, 0x58, 0x82, 0x09, 0xac, 0x4b, 0x02, 0x8b, 0x2e, 0x1f, 0x90, 0x7c, 0x10, 0x4f, 0x1a, + 0x12, 0xcf, 0x29, 0x93, 0x4b, 0x2c, 0xbf, 0x28, 0x1d, 0x59, 0x31, 0xd4, 0x9f, 0x4e, 0xbc, 0x50, + 0x17, 0x05, 0x80, 0xfc, 0x59, 0x1c, 0xc0, 0x18, 0x65, 0x9d, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, + 0x97, 0x9c, 0x9f, 0xab, 0x8f, 0x14, 0x38, 0xd8, 0x99, 0xe9, 0xf9, 0x28, 0xa1, 0xf6, 0x83, 0x91, + 0x31, 0x89, 0x0d, 0x1c, 0x62, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x64, 0x0a, 0xaa, 0x81, + 0x8d, 0x01, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/example/example.proto b/executor/proto/tensorflow/core/example/example.proto new file mode 100644 index 0000000000..e36e51d8d5 --- /dev/null +++ b/executor/proto/tensorflow/core/example/example.proto @@ -0,0 +1,301 @@ +// Protocol messages for describing input data Examples for machine learning +// model training or inference. +syntax = "proto3"; + +import "tensorflow/core/example/feature.proto"; +option cc_enable_arenas = true; +option java_outer_classname = "ExampleProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.example"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/example"; +package tensorflow; + +// An Example is a mostly-normalized data format for storing data for +// training and inference. It contains a key-value store (features); where +// each key (string) maps to a Feature message (which is oneof packed BytesList, +// FloatList, or Int64List). This flexible and compact format allows the +// storage of large amounts of typed data, but requires that the data shape +// and use be determined by the configuration files and parsers that are used to +// read and write this format. That is, the Example is mostly *not* a +// self-describing format. In TensorFlow, Examples are read in row-major +// format, so any configuration that describes data with rank-2 or above +// should keep this in mind. For example, to store an M x N matrix of Bytes, +// the BytesList must contain M*N bytes, with M rows of N contiguous values +// each. That is, the BytesList value must store the matrix as: +// .... row 0 .... .... row 1 .... // ........... // ... row M-1 .... +// +// An Example for a movie recommendation application: +// features { +// feature { +// key: "age" +// value { float_list { +// value: 29.0 +// }} +// } +// feature { +// key: "movie" +// value { bytes_list { +// value: "The Shawshank Redemption" +// value: "Fight Club" +// }} +// } +// feature { +// key: "movie_ratings" +// value { float_list { +// value: 9.0 +// value: 9.7 +// }} +// } +// feature { +// key: "suggestion" +// value { bytes_list { +// value: "Inception" +// }} +// } +// # Note that this feature exists to be used as a label in training. +// # E.g., if training a logistic regression model to predict purchase +// # probability in our learning tool we would set the label feature to +// # "suggestion_purchased". +// feature { +// key: "suggestion_purchased" +// value { float_list { +// value: 1.0 +// }} +// } +// # Similar to "suggestion_purchased" above this feature exists to be used +// # as a label in training. +// # E.g., if training a linear regression model to predict purchase +// # price in our learning tool we would set the label feature to +// # "purchase_price". +// feature { +// key: "purchase_price" +// value { float_list { +// value: 9.99 +// }} +// } +// } +// +// A conformant Example data set obeys the following conventions: +// - If a Feature K exists in one example with data type T, it must be of +// type T in all other examples when present. It may be omitted. +// - The number of instances of Feature K list data may vary across examples, +// depending on the requirements of the model. +// - If a Feature K doesn't exist in an example, a K-specific default will be +// used, if configured. +// - If a Feature K exists in an example but contains no items, the intent +// is considered to be an empty tensor and no default will be used. + +message Example { + Features features = 1; +}; + +// A SequenceExample is an Example representing one or more sequences, and +// some context. The context contains features which apply to the entire +// example. The feature_lists contain a key, value map where each key is +// associated with a repeated set of Features (a FeatureList). +// A FeatureList thus represents the values of a feature identified by its key +// over time / frames. +// +// Below is a SequenceExample for a movie recommendation application recording a +// sequence of ratings by a user. The time-independent features ("locale", +// "age", "favorites") describing the user are part of the context. The sequence +// of movies the user rated are part of the feature_lists. For each movie in the +// sequence we have information on its name and actors and the user's rating. +// This information is recorded in three separate feature_list(s). +// In the example below there are only two movies. All three feature_list(s), +// namely "movie_ratings", "movie_names", and "actors" have a feature value for +// both movies. Note, that "actors" is itself a bytes_list with multiple +// strings per movie. +// +// context: { +// feature: { +// key : "locale" +// value: { +// bytes_list: { +// value: [ "pt_BR" ] +// } +// } +// } +// feature: { +// key : "age" +// value: { +// float_list: { +// value: [ 19.0 ] +// } +// } +// } +// feature: { +// key : "favorites" +// value: { +// bytes_list: { +// value: [ "Majesty Rose", "Savannah Outen", "One Direction" ] +// } +// } +// } +// } +// feature_lists: { +// feature_list: { +// key : "movie_ratings" +// value: { +// feature: { +// float_list: { +// value: [ 4.5 ] +// } +// } +// feature: { +// float_list: { +// value: [ 5.0 ] +// } +// } +// } +// } +// feature_list: { +// key : "movie_names" +// value: { +// feature: { +// bytes_list: { +// value: [ "The Shawshank Redemption" ] +// } +// } +// feature: { +// bytes_list: { +// value: [ "Fight Club" ] +// } +// } +// } +// } +// feature_list: { +// key : "actors" +// value: { +// feature: { +// bytes_list: { +// value: [ "Tim Robbins", "Morgan Freeman" ] +// } +// } +// feature: { +// bytes_list: { +// value: [ "Brad Pitt", "Edward Norton", "Helena Bonham Carter" ] +// } +// } +// } +// } +// } +// +// A conformant SequenceExample data set obeys the following conventions: +// +// Context: +// - All conformant context features K must obey the same conventions as +// a conformant Example's features (see above). +// Feature lists: +// - A FeatureList L may be missing in an example; it is up to the +// parser configuration to determine if this is allowed or considered +// an empty list (zero length). +// - If a FeatureList L exists, it may be empty (zero length). +// - If a FeatureList L is non-empty, all features within the FeatureList +// must have the same data type T. Even across SequenceExamples, the type T +// of the FeatureList identified by the same key must be the same. An entry +// without any values may serve as an empty feature. +// - If a FeatureList L is non-empty, it is up to the parser configuration +// to determine if all features within the FeatureList must +// have the same size. The same holds for this FeatureList across multiple +// examples. +// - For sequence modeling, e.g.: +// http://colah.github.io/posts/2015-08-Understanding-LSTMs/ +// https://github.com/tensorflow/nmt +// the feature lists represent a sequence of frames. +// In this scenario, all FeatureLists in a SequenceExample have the same +// number of Feature messages, so that the ith element in each FeatureList +// is part of the ith frame (or time step). +// Examples of conformant and non-conformant examples' FeatureLists: +// +// Conformant FeatureLists: +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { float_list: { value: [ 4.5 ] } } +// feature: { float_list: { value: [ 5.0 ] } } } +// } } +// +// Non-conformant FeatureLists (mismatched types): +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { float_list: { value: [ 4.5 ] } } +// feature: { int64_list: { value: [ 5 ] } } } +// } } +// +// Conditionally conformant FeatureLists, the parser configuration determines +// if the feature sizes must match: +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { float_list: { value: [ 4.5 ] } } +// feature: { float_list: { value: [ 5.0, 6.0 ] } } } +// } } +// +// Conformant pair of SequenceExample +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { float_list: { value: [ 4.5 ] } } +// feature: { float_list: { value: [ 5.0 ] } } } +// } } +// and: +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { float_list: { value: [ 4.5 ] } } +// feature: { float_list: { value: [ 5.0 ] } } +// feature: { float_list: { value: [ 2.0 ] } } } +// } } +// +// Conformant pair of SequenceExample +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { float_list: { value: [ 4.5 ] } } +// feature: { float_list: { value: [ 5.0 ] } } } +// } } +// and: +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { } +// } } +// +// Conditionally conformant pair of SequenceExample, the parser configuration +// determines if the second feature_lists is consistent (zero-length) or +// invalid (missing "movie_ratings"): +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { float_list: { value: [ 4.5 ] } } +// feature: { float_list: { value: [ 5.0 ] } } } +// } } +// and: +// feature_lists: { } +// +// Non-conformant pair of SequenceExample (mismatched types) +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { float_list: { value: [ 4.5 ] } } +// feature: { float_list: { value: [ 5.0 ] } } } +// } } +// and: +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { int64_list: { value: [ 4 ] } } +// feature: { int64_list: { value: [ 5 ] } } +// feature: { int64_list: { value: [ 2 ] } } } +// } } +// +// Conditionally conformant pair of SequenceExample; the parser configuration +// determines if the feature sizes must match: +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { float_list: { value: [ 4.5 ] } } +// feature: { float_list: { value: [ 5.0 ] } } } +// } } +// and: +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { float_list: { value: [ 4.0 ] } } +// feature: { float_list: { value: [ 5.0, 3.0 ] } } +// } } + +message SequenceExample { + Features context = 1; + FeatureLists feature_lists = 2; +}; diff --git a/executor/proto/tensorflow/core/example/example_parser_configuration.pb.go b/executor/proto/tensorflow/core/example/example_parser_configuration.pb.go new file mode 100644 index 0000000000..cc0096d330 --- /dev/null +++ b/executor/proto/tensorflow/core/example/example_parser_configuration.pb.go @@ -0,0 +1,316 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/example/example_parser_configuration.proto + +package example + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + framework "github.com/tensorflow/tensorflow/tensorflow/go/core/framework" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type VarLenFeatureProto struct { + Dtype framework.DataType `protobuf:"varint,1,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"` + ValuesOutputTensorName string `protobuf:"bytes,2,opt,name=values_output_tensor_name,json=valuesOutputTensorName,proto3" json:"values_output_tensor_name,omitempty"` + IndicesOutputTensorName string `protobuf:"bytes,3,opt,name=indices_output_tensor_name,json=indicesOutputTensorName,proto3" json:"indices_output_tensor_name,omitempty"` + ShapesOutputTensorName string `protobuf:"bytes,4,opt,name=shapes_output_tensor_name,json=shapesOutputTensorName,proto3" json:"shapes_output_tensor_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VarLenFeatureProto) Reset() { *m = VarLenFeatureProto{} } +func (m *VarLenFeatureProto) String() string { return proto.CompactTextString(m) } +func (*VarLenFeatureProto) ProtoMessage() {} +func (*VarLenFeatureProto) Descriptor() ([]byte, []int) { + return fileDescriptor_80ac05576ebb1f2e, []int{0} +} + +func (m *VarLenFeatureProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VarLenFeatureProto.Unmarshal(m, b) +} +func (m *VarLenFeatureProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VarLenFeatureProto.Marshal(b, m, deterministic) +} +func (m *VarLenFeatureProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_VarLenFeatureProto.Merge(m, src) +} +func (m *VarLenFeatureProto) XXX_Size() int { + return xxx_messageInfo_VarLenFeatureProto.Size(m) +} +func (m *VarLenFeatureProto) XXX_DiscardUnknown() { + xxx_messageInfo_VarLenFeatureProto.DiscardUnknown(m) +} + +var xxx_messageInfo_VarLenFeatureProto proto.InternalMessageInfo + +func (m *VarLenFeatureProto) GetDtype() framework.DataType { + if m != nil { + return m.Dtype + } + return framework.DataType_DT_INVALID +} + +func (m *VarLenFeatureProto) GetValuesOutputTensorName() string { + if m != nil { + return m.ValuesOutputTensorName + } + return "" +} + +func (m *VarLenFeatureProto) GetIndicesOutputTensorName() string { + if m != nil { + return m.IndicesOutputTensorName + } + return "" +} + +func (m *VarLenFeatureProto) GetShapesOutputTensorName() string { + if m != nil { + return m.ShapesOutputTensorName + } + return "" +} + +type FixedLenFeatureProto struct { + Dtype framework.DataType `protobuf:"varint,1,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"` + Shape *framework.TensorShapeProto `protobuf:"bytes,2,opt,name=shape,proto3" json:"shape,omitempty"` + DefaultValue *framework.TensorProto `protobuf:"bytes,3,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + ValuesOutputTensorName string `protobuf:"bytes,4,opt,name=values_output_tensor_name,json=valuesOutputTensorName,proto3" json:"values_output_tensor_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FixedLenFeatureProto) Reset() { *m = FixedLenFeatureProto{} } +func (m *FixedLenFeatureProto) String() string { return proto.CompactTextString(m) } +func (*FixedLenFeatureProto) ProtoMessage() {} +func (*FixedLenFeatureProto) Descriptor() ([]byte, []int) { + return fileDescriptor_80ac05576ebb1f2e, []int{1} +} + +func (m *FixedLenFeatureProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FixedLenFeatureProto.Unmarshal(m, b) +} +func (m *FixedLenFeatureProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FixedLenFeatureProto.Marshal(b, m, deterministic) +} +func (m *FixedLenFeatureProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FixedLenFeatureProto.Merge(m, src) +} +func (m *FixedLenFeatureProto) XXX_Size() int { + return xxx_messageInfo_FixedLenFeatureProto.Size(m) +} +func (m *FixedLenFeatureProto) XXX_DiscardUnknown() { + xxx_messageInfo_FixedLenFeatureProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FixedLenFeatureProto proto.InternalMessageInfo + +func (m *FixedLenFeatureProto) GetDtype() framework.DataType { + if m != nil { + return m.Dtype + } + return framework.DataType_DT_INVALID +} + +func (m *FixedLenFeatureProto) GetShape() *framework.TensorShapeProto { + if m != nil { + return m.Shape + } + return nil +} + +func (m *FixedLenFeatureProto) GetDefaultValue() *framework.TensorProto { + if m != nil { + return m.DefaultValue + } + return nil +} + +func (m *FixedLenFeatureProto) GetValuesOutputTensorName() string { + if m != nil { + return m.ValuesOutputTensorName + } + return "" +} + +type FeatureConfiguration struct { + // Types that are valid to be assigned to Config: + // *FeatureConfiguration_FixedLenFeature + // *FeatureConfiguration_VarLenFeature + Config isFeatureConfiguration_Config `protobuf_oneof:"config"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FeatureConfiguration) Reset() { *m = FeatureConfiguration{} } +func (m *FeatureConfiguration) String() string { return proto.CompactTextString(m) } +func (*FeatureConfiguration) ProtoMessage() {} +func (*FeatureConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_80ac05576ebb1f2e, []int{2} +} + +func (m *FeatureConfiguration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FeatureConfiguration.Unmarshal(m, b) +} +func (m *FeatureConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FeatureConfiguration.Marshal(b, m, deterministic) +} +func (m *FeatureConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_FeatureConfiguration.Merge(m, src) +} +func (m *FeatureConfiguration) XXX_Size() int { + return xxx_messageInfo_FeatureConfiguration.Size(m) +} +func (m *FeatureConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_FeatureConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_FeatureConfiguration proto.InternalMessageInfo + +type isFeatureConfiguration_Config interface { + isFeatureConfiguration_Config() +} + +type FeatureConfiguration_FixedLenFeature struct { + FixedLenFeature *FixedLenFeatureProto `protobuf:"bytes,1,opt,name=fixed_len_feature,json=fixedLenFeature,proto3,oneof"` +} + +type FeatureConfiguration_VarLenFeature struct { + VarLenFeature *VarLenFeatureProto `protobuf:"bytes,2,opt,name=var_len_feature,json=varLenFeature,proto3,oneof"` +} + +func (*FeatureConfiguration_FixedLenFeature) isFeatureConfiguration_Config() {} + +func (*FeatureConfiguration_VarLenFeature) isFeatureConfiguration_Config() {} + +func (m *FeatureConfiguration) GetConfig() isFeatureConfiguration_Config { + if m != nil { + return m.Config + } + return nil +} + +func (m *FeatureConfiguration) GetFixedLenFeature() *FixedLenFeatureProto { + if x, ok := m.GetConfig().(*FeatureConfiguration_FixedLenFeature); ok { + return x.FixedLenFeature + } + return nil +} + +func (m *FeatureConfiguration) GetVarLenFeature() *VarLenFeatureProto { + if x, ok := m.GetConfig().(*FeatureConfiguration_VarLenFeature); ok { + return x.VarLenFeature + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*FeatureConfiguration) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*FeatureConfiguration_FixedLenFeature)(nil), + (*FeatureConfiguration_VarLenFeature)(nil), + } +} + +type ExampleParserConfiguration struct { + FeatureMap map[string]*FeatureConfiguration `protobuf:"bytes,1,rep,name=feature_map,json=featureMap,proto3" json:"feature_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExampleParserConfiguration) Reset() { *m = ExampleParserConfiguration{} } +func (m *ExampleParserConfiguration) String() string { return proto.CompactTextString(m) } +func (*ExampleParserConfiguration) ProtoMessage() {} +func (*ExampleParserConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_80ac05576ebb1f2e, []int{3} +} + +func (m *ExampleParserConfiguration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExampleParserConfiguration.Unmarshal(m, b) +} +func (m *ExampleParserConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExampleParserConfiguration.Marshal(b, m, deterministic) +} +func (m *ExampleParserConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExampleParserConfiguration.Merge(m, src) +} +func (m *ExampleParserConfiguration) XXX_Size() int { + return xxx_messageInfo_ExampleParserConfiguration.Size(m) +} +func (m *ExampleParserConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ExampleParserConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ExampleParserConfiguration proto.InternalMessageInfo + +func (m *ExampleParserConfiguration) GetFeatureMap() map[string]*FeatureConfiguration { + if m != nil { + return m.FeatureMap + } + return nil +} + +func init() { + proto.RegisterType((*VarLenFeatureProto)(nil), "tensorflow.VarLenFeatureProto") + proto.RegisterType((*FixedLenFeatureProto)(nil), "tensorflow.FixedLenFeatureProto") + proto.RegisterType((*FeatureConfiguration)(nil), "tensorflow.FeatureConfiguration") + proto.RegisterType((*ExampleParserConfiguration)(nil), "tensorflow.ExampleParserConfiguration") + proto.RegisterMapType((map[string]*FeatureConfiguration)(nil), "tensorflow.ExampleParserConfiguration.FeatureMapEntry") +} + +func init() { + proto.RegisterFile("tensorflow/core/example/example_parser_configuration.proto", fileDescriptor_80ac05576ebb1f2e) +} + +var fileDescriptor_80ac05576ebb1f2e = []byte{ + // 515 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xcf, 0x6a, 0xdb, 0x40, + 0x10, 0xc6, 0xb3, 0x76, 0x1c, 0x9a, 0x71, 0x53, 0xb7, 0x22, 0x24, 0x8e, 0x28, 0x45, 0x18, 0x5a, + 0x4c, 0x29, 0x32, 0xa8, 0x10, 0x9a, 0xa4, 0x27, 0xb7, 0x09, 0x3e, 0xb4, 0xa9, 0x51, 0x43, 0x0a, + 0xbd, 0x2c, 0x1b, 0x7b, 0xe4, 0x88, 0x48, 0xda, 0x65, 0xb5, 0x72, 0x62, 0xe8, 0x93, 0xf5, 0x41, + 0xfa, 0x1a, 0x3d, 0xf4, 0xd2, 0x63, 0xd1, 0xae, 0x48, 0x64, 0x5b, 0x71, 0x0f, 0x39, 0x59, 0xec, + 0xfc, 0xbe, 0xf9, 0xf3, 0x79, 0x67, 0xe1, 0x50, 0x61, 0x92, 0x72, 0x19, 0x44, 0xfc, 0xba, 0x37, + 0xe2, 0x12, 0x7b, 0x78, 0xc3, 0x62, 0x11, 0xdd, 0xfe, 0x52, 0xc1, 0x64, 0x8a, 0x92, 0x8e, 0x78, + 0x12, 0x84, 0x93, 0x4c, 0x32, 0x15, 0xf2, 0xc4, 0x15, 0x92, 0x2b, 0x6e, 0xc1, 0x9d, 0xd6, 0x7e, + 0xb3, 0x98, 0x27, 0x90, 0x2c, 0xc6, 0x6b, 0x2e, 0xaf, 0x7a, 0x26, 0x42, 0xd3, 0x4b, 0x26, 0xd0, + 0x28, 0xed, 0x57, 0xff, 0xa3, 0x0b, 0xee, 0xe5, 0x0a, 0x6e, 0x26, 0x30, 0x35, 0x58, 0xe7, 0x0f, + 0x01, 0xeb, 0x9c, 0xc9, 0x4f, 0x98, 0x9c, 0x20, 0x53, 0x99, 0xc4, 0xa1, 0xee, 0xef, 0x35, 0x34, + 0xc6, 0x39, 0xd6, 0x26, 0x0e, 0xe9, 0x3e, 0xf1, 0xb6, 0xdd, 0xbb, 0x6c, 0xee, 0x47, 0xa6, 0xd8, + 0xd9, 0x4c, 0xa0, 0x6f, 0x10, 0xeb, 0x00, 0xf6, 0xa6, 0x2c, 0xca, 0x30, 0xa5, 0x3c, 0x53, 0x22, + 0x53, 0xb4, 0xe8, 0x3a, 0x61, 0x31, 0xb6, 0x6b, 0x0e, 0xe9, 0x6e, 0xfa, 0x3b, 0x06, 0xf8, 0xa2, + 0xe3, 0x67, 0x3a, 0x7c, 0xca, 0x62, 0xb4, 0x8e, 0xc0, 0x0e, 0x93, 0x71, 0x38, 0xaa, 0xd6, 0xd6, + 0xb5, 0x76, 0xb7, 0x20, 0x96, 0xc4, 0x07, 0xb0, 0xa7, 0x8d, 0xa9, 0xd4, 0xae, 0x9b, 0xba, 0x06, + 0x58, 0x94, 0x76, 0x7e, 0x13, 0xd8, 0x3e, 0x09, 0x6f, 0x70, 0xfc, 0x90, 0xb9, 0x3d, 0x68, 0xe8, + 0xf4, 0x7a, 0xc6, 0xa6, 0xf7, 0xbc, 0xcc, 0x9a, 0x5a, 0x5f, 0xf3, 0xb0, 0x4e, 0xec, 0x1b, 0xd4, + 0x7a, 0x0f, 0x5b, 0x63, 0x0c, 0x58, 0x16, 0x29, 0xaa, 0x2d, 0xd1, 0x33, 0x36, 0xbd, 0xdd, 0x65, + 0xad, 0x91, 0x3d, 0x2e, 0xe8, 0xf3, 0x1c, 0x5e, 0xed, 0xf4, 0xfa, 0x2a, 0xa7, 0x3b, 0x3f, 0xf3, + 0x89, 0xcd, 0xa4, 0x1f, 0xca, 0xf7, 0xd1, 0x3a, 0x85, 0x67, 0x41, 0xee, 0x04, 0x8d, 0x30, 0xa1, + 0x81, 0x21, 0xf4, 0xf4, 0x4d, 0xcf, 0x29, 0x77, 0x55, 0x65, 0xd7, 0x60, 0xcd, 0x6f, 0x05, 0xf3, + 0xe7, 0xd6, 0x00, 0x5a, 0x53, 0x26, 0xe7, 0xb2, 0x19, 0x7f, 0x5e, 0x94, 0xb3, 0x2d, 0x5f, 0xb9, + 0xc1, 0x9a, 0xbf, 0x35, 0x2d, 0x9f, 0xf6, 0x1f, 0xc1, 0x86, 0x59, 0x9d, 0xce, 0x2f, 0x02, 0xf6, + 0xb1, 0x59, 0xaa, 0xa1, 0xde, 0xa9, 0xf9, 0x11, 0xbe, 0x41, 0xb3, 0x28, 0x45, 0x63, 0x26, 0xda, + 0xc4, 0xa9, 0x77, 0x9b, 0xde, 0x7e, 0xb9, 0xdc, 0xfd, 0x62, 0xb7, 0xa8, 0xf6, 0x99, 0x89, 0xe3, + 0x44, 0xc9, 0x99, 0x0f, 0xc1, 0xed, 0x81, 0x4d, 0xa1, 0xb5, 0x10, 0xb6, 0x9e, 0x42, 0xfd, 0x0a, + 0x67, 0xda, 0xa0, 0x4d, 0x3f, 0xff, 0xb4, 0xf6, 0xa1, 0x61, 0xfe, 0xca, 0x5a, 0x85, 0x69, 0x15, + 0x8e, 0xfb, 0x06, 0x3f, 0xac, 0xbd, 0x23, 0xfd, 0x1f, 0xb0, 0xc3, 0xe5, 0xa4, 0xac, 0x28, 0xde, + 0x8e, 0xbe, 0x73, 0x7f, 0xcb, 0xda, 0xae, 0x74, 0x48, 0xbe, 0x1f, 0x4d, 0x42, 0x75, 0x99, 0x5d, + 0xb8, 0x23, 0x1e, 0xf7, 0x4a, 0xdb, 0x5e, 0xfd, 0x39, 0xe1, 0x73, 0x8f, 0xd4, 0x5f, 0x42, 0x2e, + 0x36, 0xf4, 0x13, 0xf0, 0xf6, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x32, 0x82, 0xe2, 0xfc, 0xc9, + 0x04, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/example/example_parser_configuration.proto b/executor/proto/tensorflow/core/example/example_parser_configuration.proto new file mode 100644 index 0000000000..b2c115d80e --- /dev/null +++ b/executor/proto/tensorflow/core/example/example_parser_configuration.proto @@ -0,0 +1,39 @@ +// Protocol messages for describing the configuration of the ExampleParserOp. + +syntax = "proto3"; + +option cc_enable_arenas = true; +option java_outer_classname = "ExampleParserConfigurationProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.example"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/example"; +package tensorflow; + +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/tensor.proto"; +import "tensorflow/core/framework/types.proto"; + +message VarLenFeatureProto { + tensorflow.DataType dtype = 1; + string values_output_tensor_name = 2; + string indices_output_tensor_name = 3; + string shapes_output_tensor_name = 4; +}; + +message FixedLenFeatureProto { + tensorflow.DataType dtype = 1; + tensorflow.TensorShapeProto shape = 2; + tensorflow.TensorProto default_value = 3; + string values_output_tensor_name = 4; +}; + +message FeatureConfiguration { + oneof config { + FixedLenFeatureProto fixed_len_feature = 1; + VarLenFeatureProto var_len_feature = 2; + } +}; + +message ExampleParserConfiguration { + map feature_map = 1; +}; diff --git a/executor/proto/tensorflow/core/example/feature.pb.go b/executor/proto/tensorflow/core/example/feature.pb.go new file mode 100644 index 0000000000..9040456a72 --- /dev/null +++ b/executor/proto/tensorflow/core/example/feature.pb.go @@ -0,0 +1,409 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/example/feature.proto + +package example + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Containers to hold repeated fundamental values. +type BytesList struct { + Value [][]byte `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesList) Reset() { *m = BytesList{} } +func (m *BytesList) String() string { return proto.CompactTextString(m) } +func (*BytesList) ProtoMessage() {} +func (*BytesList) Descriptor() ([]byte, []int) { + return fileDescriptor_0a52991187ca0172, []int{0} +} + +func (m *BytesList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BytesList.Unmarshal(m, b) +} +func (m *BytesList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BytesList.Marshal(b, m, deterministic) +} +func (m *BytesList) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesList.Merge(m, src) +} +func (m *BytesList) XXX_Size() int { + return xxx_messageInfo_BytesList.Size(m) +} +func (m *BytesList) XXX_DiscardUnknown() { + xxx_messageInfo_BytesList.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesList proto.InternalMessageInfo + +func (m *BytesList) GetValue() [][]byte { + if m != nil { + return m.Value + } + return nil +} + +type FloatList struct { + Value []float32 `protobuf:"fixed32,1,rep,packed,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FloatList) Reset() { *m = FloatList{} } +func (m *FloatList) String() string { return proto.CompactTextString(m) } +func (*FloatList) ProtoMessage() {} +func (*FloatList) Descriptor() ([]byte, []int) { + return fileDescriptor_0a52991187ca0172, []int{1} +} + +func (m *FloatList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FloatList.Unmarshal(m, b) +} +func (m *FloatList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FloatList.Marshal(b, m, deterministic) +} +func (m *FloatList) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatList.Merge(m, src) +} +func (m *FloatList) XXX_Size() int { + return xxx_messageInfo_FloatList.Size(m) +} +func (m *FloatList) XXX_DiscardUnknown() { + xxx_messageInfo_FloatList.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatList proto.InternalMessageInfo + +func (m *FloatList) GetValue() []float32 { + if m != nil { + return m.Value + } + return nil +} + +type Int64List struct { + Value []int64 `protobuf:"varint,1,rep,packed,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int64List) Reset() { *m = Int64List{} } +func (m *Int64List) String() string { return proto.CompactTextString(m) } +func (*Int64List) ProtoMessage() {} +func (*Int64List) Descriptor() ([]byte, []int) { + return fileDescriptor_0a52991187ca0172, []int{2} +} + +func (m *Int64List) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int64List.Unmarshal(m, b) +} +func (m *Int64List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int64List.Marshal(b, m, deterministic) +} +func (m *Int64List) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64List.Merge(m, src) +} +func (m *Int64List) XXX_Size() int { + return xxx_messageInfo_Int64List.Size(m) +} +func (m *Int64List) XXX_DiscardUnknown() { + xxx_messageInfo_Int64List.DiscardUnknown(m) +} + +var xxx_messageInfo_Int64List proto.InternalMessageInfo + +func (m *Int64List) GetValue() []int64 { + if m != nil { + return m.Value + } + return nil +} + +// Containers for non-sequential data. +type Feature struct { + // Each feature can be exactly one kind. + // + // Types that are valid to be assigned to Kind: + // *Feature_BytesList + // *Feature_FloatList + // *Feature_Int64List + Kind isFeature_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Feature) Reset() { *m = Feature{} } +func (m *Feature) String() string { return proto.CompactTextString(m) } +func (*Feature) ProtoMessage() {} +func (*Feature) Descriptor() ([]byte, []int) { + return fileDescriptor_0a52991187ca0172, []int{3} +} + +func (m *Feature) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Feature.Unmarshal(m, b) +} +func (m *Feature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Feature.Marshal(b, m, deterministic) +} +func (m *Feature) XXX_Merge(src proto.Message) { + xxx_messageInfo_Feature.Merge(m, src) +} +func (m *Feature) XXX_Size() int { + return xxx_messageInfo_Feature.Size(m) +} +func (m *Feature) XXX_DiscardUnknown() { + xxx_messageInfo_Feature.DiscardUnknown(m) +} + +var xxx_messageInfo_Feature proto.InternalMessageInfo + +type isFeature_Kind interface { + isFeature_Kind() +} + +type Feature_BytesList struct { + BytesList *BytesList `protobuf:"bytes,1,opt,name=bytes_list,json=bytesList,proto3,oneof"` +} + +type Feature_FloatList struct { + FloatList *FloatList `protobuf:"bytes,2,opt,name=float_list,json=floatList,proto3,oneof"` +} + +type Feature_Int64List struct { + Int64List *Int64List `protobuf:"bytes,3,opt,name=int64_list,json=int64List,proto3,oneof"` +} + +func (*Feature_BytesList) isFeature_Kind() {} + +func (*Feature_FloatList) isFeature_Kind() {} + +func (*Feature_Int64List) isFeature_Kind() {} + +func (m *Feature) GetKind() isFeature_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Feature) GetBytesList() *BytesList { + if x, ok := m.GetKind().(*Feature_BytesList); ok { + return x.BytesList + } + return nil +} + +func (m *Feature) GetFloatList() *FloatList { + if x, ok := m.GetKind().(*Feature_FloatList); ok { + return x.FloatList + } + return nil +} + +func (m *Feature) GetInt64List() *Int64List { + if x, ok := m.GetKind().(*Feature_Int64List); ok { + return x.Int64List + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Feature) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Feature_BytesList)(nil), + (*Feature_FloatList)(nil), + (*Feature_Int64List)(nil), + } +} + +type Features struct { + // Map from feature name to feature. + Feature map[string]*Feature `protobuf:"bytes,1,rep,name=feature,proto3" json:"feature,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Features) Reset() { *m = Features{} } +func (m *Features) String() string { return proto.CompactTextString(m) } +func (*Features) ProtoMessage() {} +func (*Features) Descriptor() ([]byte, []int) { + return fileDescriptor_0a52991187ca0172, []int{4} +} + +func (m *Features) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Features.Unmarshal(m, b) +} +func (m *Features) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Features.Marshal(b, m, deterministic) +} +func (m *Features) XXX_Merge(src proto.Message) { + xxx_messageInfo_Features.Merge(m, src) +} +func (m *Features) XXX_Size() int { + return xxx_messageInfo_Features.Size(m) +} +func (m *Features) XXX_DiscardUnknown() { + xxx_messageInfo_Features.DiscardUnknown(m) +} + +var xxx_messageInfo_Features proto.InternalMessageInfo + +func (m *Features) GetFeature() map[string]*Feature { + if m != nil { + return m.Feature + } + return nil +} + +// Containers for sequential data. +// +// A FeatureList contains lists of Features. These may hold zero or more +// Feature values. +// +// FeatureLists are organized into categories by name. The FeatureLists message +// contains the mapping from name to FeatureList. +// +type FeatureList struct { + Feature []*Feature `protobuf:"bytes,1,rep,name=feature,proto3" json:"feature,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FeatureList) Reset() { *m = FeatureList{} } +func (m *FeatureList) String() string { return proto.CompactTextString(m) } +func (*FeatureList) ProtoMessage() {} +func (*FeatureList) Descriptor() ([]byte, []int) { + return fileDescriptor_0a52991187ca0172, []int{5} +} + +func (m *FeatureList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FeatureList.Unmarshal(m, b) +} +func (m *FeatureList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FeatureList.Marshal(b, m, deterministic) +} +func (m *FeatureList) XXX_Merge(src proto.Message) { + xxx_messageInfo_FeatureList.Merge(m, src) +} +func (m *FeatureList) XXX_Size() int { + return xxx_messageInfo_FeatureList.Size(m) +} +func (m *FeatureList) XXX_DiscardUnknown() { + xxx_messageInfo_FeatureList.DiscardUnknown(m) +} + +var xxx_messageInfo_FeatureList proto.InternalMessageInfo + +func (m *FeatureList) GetFeature() []*Feature { + if m != nil { + return m.Feature + } + return nil +} + +type FeatureLists struct { + // Map from feature name to feature list. + FeatureList map[string]*FeatureList `protobuf:"bytes,1,rep,name=feature_list,json=featureList,proto3" json:"feature_list,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FeatureLists) Reset() { *m = FeatureLists{} } +func (m *FeatureLists) String() string { return proto.CompactTextString(m) } +func (*FeatureLists) ProtoMessage() {} +func (*FeatureLists) Descriptor() ([]byte, []int) { + return fileDescriptor_0a52991187ca0172, []int{6} +} + +func (m *FeatureLists) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FeatureLists.Unmarshal(m, b) +} +func (m *FeatureLists) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FeatureLists.Marshal(b, m, deterministic) +} +func (m *FeatureLists) XXX_Merge(src proto.Message) { + xxx_messageInfo_FeatureLists.Merge(m, src) +} +func (m *FeatureLists) XXX_Size() int { + return xxx_messageInfo_FeatureLists.Size(m) +} +func (m *FeatureLists) XXX_DiscardUnknown() { + xxx_messageInfo_FeatureLists.DiscardUnknown(m) +} + +var xxx_messageInfo_FeatureLists proto.InternalMessageInfo + +func (m *FeatureLists) GetFeatureList() map[string]*FeatureList { + if m != nil { + return m.FeatureList + } + return nil +} + +func init() { + proto.RegisterType((*BytesList)(nil), "tensorflow.BytesList") + proto.RegisterType((*FloatList)(nil), "tensorflow.FloatList") + proto.RegisterType((*Int64List)(nil), "tensorflow.Int64List") + proto.RegisterType((*Feature)(nil), "tensorflow.Feature") + proto.RegisterType((*Features)(nil), "tensorflow.Features") + proto.RegisterMapType((map[string]*Feature)(nil), "tensorflow.Features.FeatureEntry") + proto.RegisterType((*FeatureList)(nil), "tensorflow.FeatureList") + proto.RegisterType((*FeatureLists)(nil), "tensorflow.FeatureLists") + proto.RegisterMapType((map[string]*FeatureList)(nil), "tensorflow.FeatureLists.FeatureListEntry") +} + +func init() { + proto.RegisterFile("tensorflow/core/example/feature.proto", fileDescriptor_0a52991187ca0172) +} + +var fileDescriptor_0a52991187ca0172 = []byte{ + // 394 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x93, 0xcf, 0x4e, 0xf2, 0x40, + 0x14, 0xc5, 0xbf, 0x69, 0x3f, 0xc1, 0xde, 0x62, 0x42, 0xea, 0x3f, 0xc2, 0x0a, 0x9a, 0x90, 0xc0, + 0x82, 0x36, 0x41, 0xd3, 0x18, 0x71, 0xd5, 0x44, 0xa2, 0x09, 0x89, 0xa4, 0x1b, 0x13, 0x37, 0xa6, + 0xc5, 0x29, 0x36, 0x94, 0x0e, 0x69, 0x07, 0x95, 0x37, 0xf1, 0x45, 0x5c, 0xf8, 0x66, 0x2e, 0xcd, + 0xb4, 0xd3, 0x32, 0x40, 0xdd, 0xcd, 0x74, 0xce, 0xb9, 0xf3, 0x3b, 0xb7, 0x73, 0xa1, 0x43, 0x71, + 0x94, 0x90, 0xd8, 0x0f, 0xc9, 0xbb, 0x39, 0x25, 0x31, 0x36, 0xf1, 0x87, 0xbb, 0x58, 0x86, 0xd8, + 0xf4, 0xb1, 0x4b, 0x57, 0x31, 0x36, 0x96, 0x31, 0xa1, 0x44, 0x83, 0x8d, 0x4c, 0x6f, 0x83, 0x62, + 0xaf, 0x29, 0x4e, 0xc6, 0x41, 0x42, 0xb5, 0x13, 0x38, 0x78, 0x73, 0xc3, 0x15, 0x6e, 0xa0, 0x96, + 0xdc, 0xad, 0x39, 0xd9, 0x46, 0xef, 0x80, 0x32, 0x0a, 0x89, 0x4b, 0x53, 0x49, 0x43, 0x94, 0x48, + 0xb6, 0x54, 0x47, 0x82, 0xec, 0x3e, 0xa2, 0xd6, 0xe5, 0xbe, 0x4c, 0x16, 0x65, 0xdf, 0x08, 0xaa, + 0xa3, 0x0c, 0x47, 0xb3, 0x00, 0x3c, 0x76, 0xf9, 0x73, 0x18, 0x24, 0xb4, 0x81, 0x5a, 0xa8, 0xab, + 0x0e, 0x4e, 0x8d, 0x0d, 0x9d, 0x51, 0xa0, 0xdd, 0xfd, 0x73, 0x14, 0xaf, 0xe0, 0xb4, 0x00, 0x7c, + 0x46, 0x94, 0xf9, 0xa4, 0x7d, 0x5f, 0xc1, 0xcb, 0x7c, 0x7e, 0x01, 0x6f, 0x01, 0x04, 0x0c, 0x31, + 0xf3, 0xc9, 0xfb, 0xbe, 0x22, 0x00, 0xf3, 0x05, 0xf9, 0xc6, 0xae, 0xc0, 0xff, 0x79, 0x10, 0xbd, + 0xe8, 0x9f, 0x08, 0x0e, 0x39, 0x7b, 0xa2, 0x0d, 0xa1, 0xca, 0xdb, 0x9a, 0x86, 0x54, 0x07, 0xed, + 0x2d, 0x02, 0x2e, 0xcb, 0x17, 0xb7, 0x11, 0x8d, 0xd7, 0x4e, 0xee, 0x68, 0x3e, 0x40, 0x4d, 0x3c, + 0xd0, 0xea, 0x20, 0xcf, 0xf1, 0x3a, 0x6d, 0x81, 0xe2, 0xb0, 0xa5, 0xd6, 0xcb, 0x3b, 0x98, 0xc5, + 0x3b, 0x2e, 0x29, 0xce, 0x5b, 0x7a, 0x2d, 0x5d, 0x21, 0xfd, 0x06, 0x54, 0xfe, 0x35, 0x4d, 0xda, + 0xdf, 0x85, 0x2b, 0xf5, 0xe7, 0x1a, 0xfd, 0x0b, 0x15, 0x3c, 0xcc, 0x9e, 0x68, 0x63, 0xa8, 0xf1, + 0xb3, 0xfc, 0xdf, 0xb0, 0x22, 0xbd, 0x92, 0x22, 0xa9, 0x5e, 0xdc, 0x64, 0x49, 0x55, 0x7f, 0xf3, + 0xa5, 0xf9, 0x08, 0xf5, 0x5d, 0x41, 0x49, 0xe2, 0xfe, 0x76, 0xe2, 0xf3, 0x3f, 0x2e, 0x13, 0x52, + 0xdb, 0x01, 0x9c, 0x91, 0x78, 0x26, 0x0a, 0xf9, 0x8b, 0xb7, 0x8f, 0xb8, 0x63, 0xc2, 0x5e, 0x7c, + 0x32, 0x41, 0x4f, 0xc3, 0x59, 0x40, 0x5f, 0x57, 0x9e, 0x31, 0x25, 0x0b, 0x53, 0x18, 0x93, 0xf2, + 0xe5, 0x8c, 0x6c, 0xcd, 0xcf, 0x0f, 0x42, 0x5e, 0x25, 0x9d, 0x9d, 0x8b, 0xdf, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xb8, 0x21, 0xa2, 0xe5, 0x64, 0x03, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/example/feature.proto b/executor/proto/tensorflow/core/example/feature.proto new file mode 100644 index 0000000000..6d81974aac --- /dev/null +++ b/executor/proto/tensorflow/core/example/feature.proto @@ -0,0 +1,105 @@ +// Protocol messages for describing features for machine learning model +// training or inference. +// +// There are three base Feature types: +// - bytes +// - float +// - int64 +// +// A Feature contains Lists which may hold zero or more values. These +// lists are the base values BytesList, FloatList, Int64List. +// +// Features are organized into categories by name. The Features message +// contains the mapping from name to Feature. +// +// Example Features for a movie recommendation application: +// feature { +// key: "age" +// value { float_list { +// value: 29.0 +// }} +// } +// feature { +// key: "movie" +// value { bytes_list { +// value: "The Shawshank Redemption" +// value: "Fight Club" +// }} +// } +// feature { +// key: "movie_ratings" +// value { float_list { +// value: 9.0 +// value: 9.7 +// }} +// } +// feature { +// key: "suggestion" +// value { bytes_list { +// value: "Inception" +// }} +// } +// feature { +// key: "suggestion_purchased" +// value { int64_list { +// value: 1 +// }} +// } +// feature { +// key: "purchase_price" +// value { float_list { +// value: 9.99 +// }} +// } +// + +syntax = "proto3"; +option cc_enable_arenas = true; +option java_outer_classname = "FeatureProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.example"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/example"; +package tensorflow; + +// Containers to hold repeated fundamental values. +message BytesList { + repeated bytes value = 1; +} +message FloatList { + repeated float value = 1 [packed = true]; +} +message Int64List { + repeated int64 value = 1 [packed = true]; +} + +// Containers for non-sequential data. +message Feature { + // Each feature can be exactly one kind. + oneof kind { + BytesList bytes_list = 1; + FloatList float_list = 2; + Int64List int64_list = 3; + } +}; + +message Features { + // Map from feature name to feature. + map feature = 1; +}; + +// Containers for sequential data. +// +// A FeatureList contains lists of Features. These may hold zero or more +// Feature values. +// +// FeatureLists are organized into categories by name. The FeatureLists message +// contains the mapping from name to FeatureList. +// +message FeatureList { + repeated Feature feature = 1; +}; + +message FeatureLists { + // Map from feature name to feature list. + map feature_list = 1; +}; diff --git a/executor/proto/tensorflow/core/framework/allocation_description.pb.go b/executor/proto/tensorflow/core/framework/allocation_description.pb.go new file mode 100644 index 0000000000..60ecf064b5 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/allocation_description.pb.go @@ -0,0 +1,136 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/allocation_description.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type AllocationDescription struct { + // Total number of bytes requested + RequestedBytes int64 `protobuf:"varint,1,opt,name=requested_bytes,json=requestedBytes,proto3" json:"requested_bytes,omitempty"` + // Total number of bytes allocated if known + AllocatedBytes int64 `protobuf:"varint,2,opt,name=allocated_bytes,json=allocatedBytes,proto3" json:"allocated_bytes,omitempty"` + // Name of the allocator used + AllocatorName string `protobuf:"bytes,3,opt,name=allocator_name,json=allocatorName,proto3" json:"allocator_name,omitempty"` + // Identifier of the allocated buffer if known + AllocationId int64 `protobuf:"varint,4,opt,name=allocation_id,json=allocationId,proto3" json:"allocation_id,omitempty"` + // Set if this tensor only has one remaining reference + HasSingleReference bool `protobuf:"varint,5,opt,name=has_single_reference,json=hasSingleReference,proto3" json:"has_single_reference,omitempty"` + // Address of the allocation. + Ptr uint64 `protobuf:"varint,6,opt,name=ptr,proto3" json:"ptr,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AllocationDescription) Reset() { *m = AllocationDescription{} } +func (m *AllocationDescription) String() string { return proto.CompactTextString(m) } +func (*AllocationDescription) ProtoMessage() {} +func (*AllocationDescription) Descriptor() ([]byte, []int) { + return fileDescriptor_1254702e9f0c7d2f, []int{0} +} + +func (m *AllocationDescription) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AllocationDescription.Unmarshal(m, b) +} +func (m *AllocationDescription) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AllocationDescription.Marshal(b, m, deterministic) +} +func (m *AllocationDescription) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocationDescription.Merge(m, src) +} +func (m *AllocationDescription) XXX_Size() int { + return xxx_messageInfo_AllocationDescription.Size(m) +} +func (m *AllocationDescription) XXX_DiscardUnknown() { + xxx_messageInfo_AllocationDescription.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocationDescription proto.InternalMessageInfo + +func (m *AllocationDescription) GetRequestedBytes() int64 { + if m != nil { + return m.RequestedBytes + } + return 0 +} + +func (m *AllocationDescription) GetAllocatedBytes() int64 { + if m != nil { + return m.AllocatedBytes + } + return 0 +} + +func (m *AllocationDescription) GetAllocatorName() string { + if m != nil { + return m.AllocatorName + } + return "" +} + +func (m *AllocationDescription) GetAllocationId() int64 { + if m != nil { + return m.AllocationId + } + return 0 +} + +func (m *AllocationDescription) GetHasSingleReference() bool { + if m != nil { + return m.HasSingleReference + } + return false +} + +func (m *AllocationDescription) GetPtr() uint64 { + if m != nil { + return m.Ptr + } + return 0 +} + +func init() { + proto.RegisterType((*AllocationDescription)(nil), "tensorflow.AllocationDescription") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/allocation_description.proto", fileDescriptor_1254702e9f0c7d2f) +} + +var fileDescriptor_1254702e9f0c7d2f = []byte{ + // 279 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x4d, 0x4b, 0xc3, 0x40, + 0x10, 0x86, 0x59, 0x5b, 0x8b, 0x2e, 0x56, 0x65, 0x51, 0x58, 0xf0, 0x12, 0x14, 0x31, 0xa7, 0x44, + 0x10, 0xbc, 0x79, 0x30, 0x78, 0xf1, 0x22, 0x25, 0xde, 0xbc, 0x84, 0x4d, 0x32, 0xf9, 0xc0, 0x24, + 0x13, 0x67, 0xb7, 0x14, 0xf1, 0x8f, 0xeb, 0x4d, 0x36, 0xb5, 0x9b, 0x22, 0xbd, 0x0d, 0xcf, 0x3c, + 0xfb, 0xc1, 0xfb, 0xf2, 0x7b, 0x03, 0x9d, 0x46, 0x2a, 0x1a, 0x5c, 0x85, 0x19, 0x12, 0x84, 0x05, + 0xa9, 0x16, 0x56, 0x48, 0xef, 0xa1, 0x6a, 0x1a, 0xcc, 0x94, 0xa9, 0xb1, 0x4b, 0x72, 0xd0, 0x19, + 0xd5, 0xbd, 0x9d, 0x83, 0x9e, 0xd0, 0xa0, 0xe0, 0xe3, 0xb9, 0xcb, 0x1f, 0xc6, 0xcf, 0x1f, 0x9d, + 0xfc, 0x34, 0xba, 0xe2, 0x86, 0x9f, 0x10, 0x7c, 0x2c, 0x41, 0x1b, 0xc8, 0x93, 0xf4, 0xd3, 0x80, + 0x96, 0xcc, 0x63, 0xfe, 0x24, 0x3e, 0x76, 0x38, 0xb2, 0xd4, 0x8a, 0x7f, 0xcf, 0x39, 0x71, 0x6f, + 0x2d, 0x3a, 0xbc, 0x16, 0xaf, 0xf9, 0x86, 0x20, 0x25, 0x9d, 0x6a, 0x41, 0x4e, 0x3c, 0xe6, 0x1f, + 0xc6, 0x73, 0x47, 0x5f, 0x54, 0x0b, 0xe2, 0x8a, 0xcf, 0xb7, 0xbe, 0x5f, 0xe7, 0x72, 0x3a, 0xdc, + 0x76, 0x34, 0xc2, 0xe7, 0x5c, 0xdc, 0xf2, 0xb3, 0x4a, 0xe9, 0x44, 0xd7, 0x5d, 0xd9, 0x40, 0x42, + 0x50, 0x00, 0x41, 0x97, 0x81, 0xdc, 0xf7, 0x98, 0x7f, 0x10, 0x8b, 0x4a, 0xe9, 0xd7, 0x61, 0x15, + 0x6f, 0x36, 0xe2, 0x94, 0x4f, 0x7a, 0x43, 0x72, 0xe6, 0x31, 0x7f, 0x1a, 0xdb, 0x31, 0xfa, 0xe2, + 0x12, 0xa9, 0x0c, 0xc6, 0x34, 0x02, 0x17, 0x60, 0x74, 0xb1, 0x33, 0x94, 0x85, 0xcd, 0x4f, 0x2f, + 0xd8, 0xdb, 0x43, 0x59, 0x9b, 0x6a, 0x99, 0x06, 0x19, 0xb6, 0xe1, 0x56, 0x0b, 0xbb, 0xc7, 0x12, + 0xff, 0xd5, 0xf3, 0xcd, 0x58, 0x3a, 0x1b, 0xba, 0xb8, 0xfb, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xca, + 0xac, 0x8d, 0x6f, 0xc5, 0x01, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/allocation_description.proto b/executor/proto/tensorflow/core/framework/allocation_description.proto new file mode 100644 index 0000000000..64133b05e1 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/allocation_description.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "AllocationDescriptionProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; + +message AllocationDescription { + // Total number of bytes requested + int64 requested_bytes = 1; + + // Total number of bytes allocated if known + int64 allocated_bytes = 2; + + // Name of the allocator used + string allocator_name = 3; + + // Identifier of the allocated buffer if known + int64 allocation_id = 4; + + // Set if this tensor only has one remaining reference + bool has_single_reference = 5; + + // Address of the allocation. + uint64 ptr = 6; +}; diff --git a/executor/proto/tensorflow/core/framework/api_def.pb.go b/executor/proto/tensorflow/core/framework/api_def.pb.go new file mode 100644 index 0000000000..90ac1d6ea2 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/api_def.pb.go @@ -0,0 +1,522 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/api_def.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type ApiDef_Visibility int32 + +const ( + // Normally this is "VISIBLE" unless you are inheriting a + // different value from another ApiDef. + ApiDef_DEFAULT_VISIBILITY ApiDef_Visibility = 0 + // Publicly visible in the API. + ApiDef_VISIBLE ApiDef_Visibility = 1 + // Do not include this op in the generated API. If visibility is + // set to 'SKIP', other fields are ignored for this op. + ApiDef_SKIP ApiDef_Visibility = 2 + // Hide this op by putting it into an internal namespace (or whatever + // is appropriate in the target language). + ApiDef_HIDDEN ApiDef_Visibility = 3 +) + +var ApiDef_Visibility_name = map[int32]string{ + 0: "DEFAULT_VISIBILITY", + 1: "VISIBLE", + 2: "SKIP", + 3: "HIDDEN", +} + +var ApiDef_Visibility_value = map[string]int32{ + "DEFAULT_VISIBILITY": 0, + "VISIBLE": 1, + "SKIP": 2, + "HIDDEN": 3, +} + +func (x ApiDef_Visibility) String() string { + return proto.EnumName(ApiDef_Visibility_name, int32(x)) +} + +func (ApiDef_Visibility) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_00a850add58b816a, []int{0, 0} +} + +// Used to specify and override the default API & behavior in the +// generated code for client languages, from what you would get from +// the OpDef alone. There will be a set of ApiDefs that are common +// to all client languages, and another set per client language. +// The per-client-language ApiDefs will inherit values from the +// common ApiDefs which it can either replace or modify. +// +// We separate the API definition from the OpDef so we can evolve the +// API while remaining backwards compatible when interpretting old +// graphs. Overrides go in an "api_def.pbtxt" file with a text-format +// ApiDefs message. +// +// WARNING: Be *very* careful changing the API for any existing op -- +// you can change the semantics of existing code. These changes may +// need to wait until a major release of TensorFlow to avoid breaking +// our compatibility promises. +type ApiDef struct { + // Name of the op (in the OpDef) to specify the API for. + GraphOpName string `protobuf:"bytes,1,opt,name=graph_op_name,json=graphOpName,proto3" json:"graph_op_name,omitempty"` + // If this op is deprecated, set deprecation message to the message + // that should be logged when this op is used. + // The message should indicate alternative op to use, if any. + DeprecationMessage string `protobuf:"bytes,12,opt,name=deprecation_message,json=deprecationMessage,proto3" json:"deprecation_message,omitempty"` + // Major version when the op will be deleted. For e.g. set this + // value to 2 if op API should be removed in TensorFlow 2.0 and + // deprecated in versions before that. + DeprecationVersion int32 `protobuf:"varint,13,opt,name=deprecation_version,json=deprecationVersion,proto3" json:"deprecation_version,omitempty"` + Visibility ApiDef_Visibility `protobuf:"varint,2,opt,name=visibility,proto3,enum=tensorflow.ApiDef_Visibility" json:"visibility,omitempty"` + Endpoint []*ApiDef_Endpoint `protobuf:"bytes,3,rep,name=endpoint,proto3" json:"endpoint,omitempty"` + InArg []*ApiDef_Arg `protobuf:"bytes,4,rep,name=in_arg,json=inArg,proto3" json:"in_arg,omitempty"` + OutArg []*ApiDef_Arg `protobuf:"bytes,5,rep,name=out_arg,json=outArg,proto3" json:"out_arg,omitempty"` + // List of original in_arg names to specify new argument order. + // Length of arg_order should be either empty to keep current order + // or match size of in_arg. + ArgOrder []string `protobuf:"bytes,11,rep,name=arg_order,json=argOrder,proto3" json:"arg_order,omitempty"` + Attr []*ApiDef_Attr `protobuf:"bytes,6,rep,name=attr,proto3" json:"attr,omitempty"` + // One-line human-readable description of what the Op does. + Summary string `protobuf:"bytes,7,opt,name=summary,proto3" json:"summary,omitempty"` + // Additional, longer human-readable description of what the Op does. + Description string `protobuf:"bytes,8,opt,name=description,proto3" json:"description,omitempty"` + // Modify an existing/inherited description by adding text to the beginning + // or end. + DescriptionPrefix string `protobuf:"bytes,9,opt,name=description_prefix,json=descriptionPrefix,proto3" json:"description_prefix,omitempty"` + DescriptionSuffix string `protobuf:"bytes,10,opt,name=description_suffix,json=descriptionSuffix,proto3" json:"description_suffix,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApiDef) Reset() { *m = ApiDef{} } +func (m *ApiDef) String() string { return proto.CompactTextString(m) } +func (*ApiDef) ProtoMessage() {} +func (*ApiDef) Descriptor() ([]byte, []int) { + return fileDescriptor_00a850add58b816a, []int{0} +} + +func (m *ApiDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApiDef.Unmarshal(m, b) +} +func (m *ApiDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApiDef.Marshal(b, m, deterministic) +} +func (m *ApiDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApiDef.Merge(m, src) +} +func (m *ApiDef) XXX_Size() int { + return xxx_messageInfo_ApiDef.Size(m) +} +func (m *ApiDef) XXX_DiscardUnknown() { + xxx_messageInfo_ApiDef.DiscardUnknown(m) +} + +var xxx_messageInfo_ApiDef proto.InternalMessageInfo + +func (m *ApiDef) GetGraphOpName() string { + if m != nil { + return m.GraphOpName + } + return "" +} + +func (m *ApiDef) GetDeprecationMessage() string { + if m != nil { + return m.DeprecationMessage + } + return "" +} + +func (m *ApiDef) GetDeprecationVersion() int32 { + if m != nil { + return m.DeprecationVersion + } + return 0 +} + +func (m *ApiDef) GetVisibility() ApiDef_Visibility { + if m != nil { + return m.Visibility + } + return ApiDef_DEFAULT_VISIBILITY +} + +func (m *ApiDef) GetEndpoint() []*ApiDef_Endpoint { + if m != nil { + return m.Endpoint + } + return nil +} + +func (m *ApiDef) GetInArg() []*ApiDef_Arg { + if m != nil { + return m.InArg + } + return nil +} + +func (m *ApiDef) GetOutArg() []*ApiDef_Arg { + if m != nil { + return m.OutArg + } + return nil +} + +func (m *ApiDef) GetArgOrder() []string { + if m != nil { + return m.ArgOrder + } + return nil +} + +func (m *ApiDef) GetAttr() []*ApiDef_Attr { + if m != nil { + return m.Attr + } + return nil +} + +func (m *ApiDef) GetSummary() string { + if m != nil { + return m.Summary + } + return "" +} + +func (m *ApiDef) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ApiDef) GetDescriptionPrefix() string { + if m != nil { + return m.DescriptionPrefix + } + return "" +} + +func (m *ApiDef) GetDescriptionSuffix() string { + if m != nil { + return m.DescriptionSuffix + } + return "" +} + +// If you specify any endpoint, this will replace all of the +// inherited endpoints. The first endpoint should be the +// "canonical" endpoint, and should not be deprecated (unless all +// endpoints are deprecated). +type ApiDef_Endpoint struct { + // Name should be either like "CamelCaseName" or + // "Package.CamelCaseName". Client-language-specific ApiDefs may + // use a snake_case convention instead of CamelCase. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Set if this endpoint is deprecated. If set to true, a message suggesting + // to use a non-deprecated endpoint instead will be printed. If all + // endpoints are deprecated, set deprecation_message in ApiDef instead. + Deprecated bool `protobuf:"varint,3,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + // Major version when an endpoint will be deleted. For e.g. set this + // value to 2 if endpoint should be removed in TensorFlow 2.0 and + // deprecated in versions before that. + DeprecationVersion int32 `protobuf:"varint,4,opt,name=deprecation_version,json=deprecationVersion,proto3" json:"deprecation_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApiDef_Endpoint) Reset() { *m = ApiDef_Endpoint{} } +func (m *ApiDef_Endpoint) String() string { return proto.CompactTextString(m) } +func (*ApiDef_Endpoint) ProtoMessage() {} +func (*ApiDef_Endpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_00a850add58b816a, []int{0, 0} +} + +func (m *ApiDef_Endpoint) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApiDef_Endpoint.Unmarshal(m, b) +} +func (m *ApiDef_Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApiDef_Endpoint.Marshal(b, m, deterministic) +} +func (m *ApiDef_Endpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApiDef_Endpoint.Merge(m, src) +} +func (m *ApiDef_Endpoint) XXX_Size() int { + return xxx_messageInfo_ApiDef_Endpoint.Size(m) +} +func (m *ApiDef_Endpoint) XXX_DiscardUnknown() { + xxx_messageInfo_ApiDef_Endpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_ApiDef_Endpoint proto.InternalMessageInfo + +func (m *ApiDef_Endpoint) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ApiDef_Endpoint) GetDeprecated() bool { + if m != nil { + return m.Deprecated + } + return false +} + +func (m *ApiDef_Endpoint) GetDeprecationVersion() int32 { + if m != nil { + return m.DeprecationVersion + } + return 0 +} + +type ApiDef_Arg struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Change the name used to access this arg in the API from what + // is used in the GraphDef. Note that these names in `backticks` + // will also be replaced in the summary & description fields. + RenameTo string `protobuf:"bytes,2,opt,name=rename_to,json=renameTo,proto3" json:"rename_to,omitempty"` + // Note: this will replace any inherited arg doc. There is no + // current way of modifying arg descriptions (other than replacing + // them entirely) as can be done with op descriptions. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApiDef_Arg) Reset() { *m = ApiDef_Arg{} } +func (m *ApiDef_Arg) String() string { return proto.CompactTextString(m) } +func (*ApiDef_Arg) ProtoMessage() {} +func (*ApiDef_Arg) Descriptor() ([]byte, []int) { + return fileDescriptor_00a850add58b816a, []int{0, 1} +} + +func (m *ApiDef_Arg) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApiDef_Arg.Unmarshal(m, b) +} +func (m *ApiDef_Arg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApiDef_Arg.Marshal(b, m, deterministic) +} +func (m *ApiDef_Arg) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApiDef_Arg.Merge(m, src) +} +func (m *ApiDef_Arg) XXX_Size() int { + return xxx_messageInfo_ApiDef_Arg.Size(m) +} +func (m *ApiDef_Arg) XXX_DiscardUnknown() { + xxx_messageInfo_ApiDef_Arg.DiscardUnknown(m) +} + +var xxx_messageInfo_ApiDef_Arg proto.InternalMessageInfo + +func (m *ApiDef_Arg) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ApiDef_Arg) GetRenameTo() string { + if m != nil { + return m.RenameTo + } + return "" +} + +func (m *ApiDef_Arg) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// Description of the graph-construction-time configuration of this +// Op. That is to say, this describes the attr fields that will +// be specified in the NodeDef. +type ApiDef_Attr struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Change the name used to access this attr in the API from what + // is used in the GraphDef. Note that these names in `backticks` + // will also be replaced in the summary & description fields. + RenameTo string `protobuf:"bytes,2,opt,name=rename_to,json=renameTo,proto3" json:"rename_to,omitempty"` + // Specify a new default value to use for this attr. This default + // will be used when creating new graphs, as opposed to the + // default in the OpDef, which will be used when interpreting old + // GraphDefs. + DefaultValue *AttrValue `protobuf:"bytes,3,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + // Note: this will replace any inherited attr doc, there is no current + // way of modifying attr descriptions as can be done with op descriptions. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApiDef_Attr) Reset() { *m = ApiDef_Attr{} } +func (m *ApiDef_Attr) String() string { return proto.CompactTextString(m) } +func (*ApiDef_Attr) ProtoMessage() {} +func (*ApiDef_Attr) Descriptor() ([]byte, []int) { + return fileDescriptor_00a850add58b816a, []int{0, 2} +} + +func (m *ApiDef_Attr) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApiDef_Attr.Unmarshal(m, b) +} +func (m *ApiDef_Attr) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApiDef_Attr.Marshal(b, m, deterministic) +} +func (m *ApiDef_Attr) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApiDef_Attr.Merge(m, src) +} +func (m *ApiDef_Attr) XXX_Size() int { + return xxx_messageInfo_ApiDef_Attr.Size(m) +} +func (m *ApiDef_Attr) XXX_DiscardUnknown() { + xxx_messageInfo_ApiDef_Attr.DiscardUnknown(m) +} + +var xxx_messageInfo_ApiDef_Attr proto.InternalMessageInfo + +func (m *ApiDef_Attr) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ApiDef_Attr) GetRenameTo() string { + if m != nil { + return m.RenameTo + } + return "" +} + +func (m *ApiDef_Attr) GetDefaultValue() *AttrValue { + if m != nil { + return m.DefaultValue + } + return nil +} + +func (m *ApiDef_Attr) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +type ApiDefs struct { + Op []*ApiDef `protobuf:"bytes,1,rep,name=op,proto3" json:"op,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApiDefs) Reset() { *m = ApiDefs{} } +func (m *ApiDefs) String() string { return proto.CompactTextString(m) } +func (*ApiDefs) ProtoMessage() {} +func (*ApiDefs) Descriptor() ([]byte, []int) { + return fileDescriptor_00a850add58b816a, []int{1} +} + +func (m *ApiDefs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApiDefs.Unmarshal(m, b) +} +func (m *ApiDefs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApiDefs.Marshal(b, m, deterministic) +} +func (m *ApiDefs) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApiDefs.Merge(m, src) +} +func (m *ApiDefs) XXX_Size() int { + return xxx_messageInfo_ApiDefs.Size(m) +} +func (m *ApiDefs) XXX_DiscardUnknown() { + xxx_messageInfo_ApiDefs.DiscardUnknown(m) +} + +var xxx_messageInfo_ApiDefs proto.InternalMessageInfo + +func (m *ApiDefs) GetOp() []*ApiDef { + if m != nil { + return m.Op + } + return nil +} + +func init() { + proto.RegisterEnum("tensorflow.ApiDef_Visibility", ApiDef_Visibility_name, ApiDef_Visibility_value) + proto.RegisterType((*ApiDef)(nil), "tensorflow.ApiDef") + proto.RegisterType((*ApiDef_Endpoint)(nil), "tensorflow.ApiDef.Endpoint") + proto.RegisterType((*ApiDef_Arg)(nil), "tensorflow.ApiDef.Arg") + proto.RegisterType((*ApiDef_Attr)(nil), "tensorflow.ApiDef.Attr") + proto.RegisterType((*ApiDefs)(nil), "tensorflow.ApiDefs") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/api_def.proto", fileDescriptor_00a850add58b816a) +} + +var fileDescriptor_00a850add58b816a = []byte{ + // 606 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xd1, 0x6e, 0xd3, 0x30, + 0x14, 0x25, 0x4d, 0xd6, 0xa6, 0xb7, 0x1b, 0x2a, 0x46, 0x0c, 0xab, 0x13, 0x28, 0xea, 0x0b, 0x11, + 0x68, 0x8d, 0x34, 0x1e, 0x90, 0x90, 0xf6, 0xd0, 0xa9, 0x05, 0x22, 0xc6, 0x56, 0x65, 0xa3, 0x02, + 0x5e, 0x22, 0xaf, 0x71, 0x32, 0x8b, 0x26, 0xb6, 0x1c, 0x67, 0x63, 0x1f, 0xc2, 0xaf, 0x22, 0x1e, + 0x51, 0x9c, 0x6d, 0xcd, 0xda, 0x6e, 0x12, 0x6f, 0xbe, 0x3e, 0xe7, 0xdc, 0xba, 0xe7, 0x9e, 0x5c, + 0x78, 0xa5, 0x68, 0x96, 0x73, 0x19, 0xcf, 0xf9, 0xa5, 0x37, 0xe3, 0x92, 0x7a, 0xb1, 0x24, 0x29, + 0xbd, 0xe4, 0xf2, 0xa7, 0x47, 0x04, 0x0b, 0x23, 0x1a, 0x0f, 0x84, 0xe4, 0x8a, 0x23, 0x58, 0x10, + 0x7b, 0xaf, 0x1f, 0x10, 0x29, 0x25, 0xc3, 0x0b, 0x32, 0x2f, 0x68, 0xa5, 0xeb, 0xff, 0x69, 0x41, + 0x73, 0x28, 0xd8, 0x88, 0xc6, 0xa8, 0x0f, 0x5b, 0x89, 0x24, 0xe2, 0x3c, 0xe4, 0x22, 0xcc, 0x48, + 0x4a, 0xb1, 0xe1, 0x18, 0x6e, 0x3b, 0xe8, 0xe8, 0xcb, 0x63, 0x71, 0x44, 0x52, 0x8a, 0x3c, 0x78, + 0x1a, 0x51, 0x21, 0xe9, 0x8c, 0x28, 0xc6, 0xb3, 0x30, 0xa5, 0x79, 0x4e, 0x12, 0x8a, 0x37, 0x35, + 0x13, 0xd5, 0xa0, 0x2f, 0x15, 0xb2, 0x2c, 0xb8, 0xa0, 0x32, 0x67, 0x3c, 0xc3, 0x5b, 0x8e, 0xe1, + 0x6e, 0xdc, 0x11, 0x4c, 0x2b, 0x04, 0xed, 0x03, 0x5c, 0xb0, 0x9c, 0x9d, 0xb1, 0x39, 0x53, 0x57, + 0xb8, 0xe1, 0x18, 0xee, 0xe3, 0xbd, 0x17, 0x83, 0xc5, 0x3f, 0x1a, 0x54, 0xaf, 0x1d, 0x4c, 0x6f, + 0x49, 0x41, 0x4d, 0x80, 0xde, 0x81, 0x4d, 0xb3, 0x48, 0x70, 0x96, 0x29, 0x6c, 0x3a, 0xa6, 0xdb, + 0xd9, 0xdb, 0x59, 0x23, 0x1e, 0x5f, 0x53, 0x82, 0x5b, 0x32, 0xda, 0x85, 0x26, 0xcb, 0x42, 0x22, + 0x13, 0x6c, 0x69, 0xd9, 0xf6, 0x1a, 0xd9, 0x50, 0x26, 0xc1, 0x06, 0xcb, 0x86, 0x32, 0x41, 0x1e, + 0xb4, 0x78, 0xa1, 0x34, 0x7f, 0xe3, 0x41, 0x7e, 0x93, 0x17, 0xaa, 0x14, 0xec, 0x40, 0x9b, 0xc8, + 0x24, 0xe4, 0x32, 0xa2, 0x12, 0x77, 0x1c, 0xd3, 0x6d, 0x07, 0x36, 0x91, 0xc9, 0x71, 0x59, 0xa3, + 0x37, 0x60, 0x95, 0x93, 0xc1, 0x4d, 0xdd, 0xea, 0xf9, 0xba, 0x56, 0x4a, 0xc9, 0x40, 0x93, 0x10, + 0x86, 0x56, 0x5e, 0xa4, 0x29, 0x91, 0x57, 0xb8, 0xa5, 0x7d, 0xbf, 0x29, 0x91, 0x03, 0x9d, 0x88, + 0xe6, 0x33, 0xc9, 0x44, 0xe9, 0x28, 0xb6, 0xab, 0xf9, 0xd5, 0xae, 0xd0, 0x2e, 0xa0, 0x5a, 0x19, + 0x0a, 0x49, 0x63, 0xf6, 0x0b, 0xb7, 0x35, 0xf1, 0x49, 0x0d, 0x99, 0x68, 0x60, 0x99, 0x9e, 0x17, + 0x71, 0x49, 0x87, 0x15, 0xfa, 0x89, 0x06, 0x7a, 0x1c, 0xec, 0x1b, 0x67, 0x11, 0x02, 0xab, 0x16, + 0x22, 0x7d, 0x46, 0x2f, 0x01, 0x6e, 0x26, 0x4e, 0x23, 0x6c, 0x3a, 0x86, 0x6b, 0x07, 0xb5, 0x9b, + 0xfb, 0xc2, 0x62, 0xdd, 0x17, 0x96, 0xde, 0x37, 0x30, 0x4b, 0x6f, 0xd7, 0xfd, 0xd6, 0x0e, 0xb4, + 0x25, 0x2d, 0x4f, 0xa1, 0xe2, 0x3a, 0x46, 0xed, 0xc0, 0xae, 0x2e, 0x4e, 0xf9, 0xb2, 0x51, 0xe6, + 0x8a, 0x51, 0xbd, 0xdf, 0x06, 0x58, 0xa5, 0xe7, 0xff, 0xdf, 0xfb, 0x3d, 0x6c, 0x45, 0x34, 0x26, + 0xc5, 0x5c, 0x55, 0x1f, 0x9a, 0xee, 0xde, 0xd9, 0x7b, 0x76, 0x67, 0xa8, 0x4a, 0xc9, 0x69, 0x09, + 0x06, 0x9b, 0xd7, 0x5c, 0x5d, 0x2d, 0xbf, 0xcb, 0x5a, 0x79, 0x57, 0xff, 0x23, 0xc0, 0x22, 0xf9, + 0x68, 0x1b, 0xd0, 0x68, 0xfc, 0x61, 0xf8, 0xf5, 0xf0, 0x34, 0x9c, 0xfa, 0x27, 0xfe, 0x81, 0x7f, + 0xe8, 0x9f, 0x7e, 0xef, 0x3e, 0x42, 0x1d, 0x68, 0xe9, 0xfa, 0x70, 0xdc, 0x35, 0x90, 0x0d, 0xd6, + 0xc9, 0x67, 0x7f, 0xd2, 0x6d, 0x20, 0x80, 0xe6, 0x27, 0x7f, 0x34, 0x1a, 0x1f, 0x75, 0xcd, 0xfe, + 0x2e, 0xb4, 0xaa, 0x68, 0xe5, 0xa8, 0x0f, 0x0d, 0x2e, 0xb0, 0xa1, 0xb3, 0x87, 0x56, 0xb3, 0x17, + 0x34, 0xb8, 0x38, 0x98, 0x03, 0xe6, 0x32, 0xa9, 0x83, 0xb7, 0x4b, 0xe5, 0x60, 0xb3, 0xe2, 0x4d, + 0xca, 0x85, 0x92, 0x4f, 0x8c, 0x1f, 0xfb, 0x09, 0x53, 0xe7, 0xc5, 0xd9, 0x60, 0xc6, 0x53, 0xaf, + 0xb6, 0x8a, 0xd6, 0x1f, 0x13, 0xbe, 0xb4, 0xa3, 0xfe, 0x1a, 0xc6, 0x59, 0x53, 0x2f, 0xa7, 0xb7, + 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xfc, 0xcd, 0x05, 0x2e, 0xff, 0x04, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/api_def.proto b/executor/proto/tensorflow/core/framework/api_def.proto new file mode 100644 index 0000000000..b0f852170b --- /dev/null +++ b/executor/proto/tensorflow/core/framework/api_def.proto @@ -0,0 +1,136 @@ +// Defines the text format for including per-op API definition and +// overrides for client language op code generators. + +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "ApiDefProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; +import "tensorflow/core/framework/attr_value.proto"; + +// Used to specify and override the default API & behavior in the +// generated code for client languages, from what you would get from +// the OpDef alone. There will be a set of ApiDefs that are common +// to all client languages, and another set per client language. +// The per-client-language ApiDefs will inherit values from the +// common ApiDefs which it can either replace or modify. +// +// We separate the API definition from the OpDef so we can evolve the +// API while remaining backwards compatible when interpretting old +// graphs. Overrides go in an "api_def.pbtxt" file with a text-format +// ApiDefs message. +// +// WARNING: Be *very* careful changing the API for any existing op -- +// you can change the semantics of existing code. These changes may +// need to wait until a major release of TensorFlow to avoid breaking +// our compatibility promises. +message ApiDef { + // Name of the op (in the OpDef) to specify the API for. + string graph_op_name = 1; + // If this op is deprecated, set deprecation message to the message + // that should be logged when this op is used. + // The message should indicate alternative op to use, if any. + string deprecation_message = 12; + // Major version when the op will be deleted. For e.g. set this + // value to 2 if op API should be removed in TensorFlow 2.0 and + // deprecated in versions before that. + int32 deprecation_version = 13; + + enum Visibility { + // Normally this is "VISIBLE" unless you are inheriting a + // different value from another ApiDef. + DEFAULT_VISIBILITY = 0; + // Publicly visible in the API. + VISIBLE = 1; + // Do not include this op in the generated API. If visibility is + // set to 'SKIP', other fields are ignored for this op. + SKIP = 2; + // Hide this op by putting it into an internal namespace (or whatever + // is appropriate in the target language). + HIDDEN = 3; + } + Visibility visibility = 2; + + // If you specify any endpoint, this will replace all of the + // inherited endpoints. The first endpoint should be the + // "canonical" endpoint, and should not be deprecated (unless all + // endpoints are deprecated). + message Endpoint { + // Name should be either like "CamelCaseName" or + // "Package.CamelCaseName". Client-language-specific ApiDefs may + // use a snake_case convention instead of CamelCase. + string name = 1; + + // Set if this endpoint is deprecated. If set to true, a message suggesting + // to use a non-deprecated endpoint instead will be printed. If all + // endpoints are deprecated, set deprecation_message in ApiDef instead. + bool deprecated = 3; + + // Major version when an endpoint will be deleted. For e.g. set this + // value to 2 if endpoint should be removed in TensorFlow 2.0 and + // deprecated in versions before that. + int32 deprecation_version = 4; + } + repeated Endpoint endpoint = 3; + + message Arg { + string name = 1; + + // Change the name used to access this arg in the API from what + // is used in the GraphDef. Note that these names in `backticks` + // will also be replaced in the summary & description fields. + string rename_to = 2; + + // Note: this will replace any inherited arg doc. There is no + // current way of modifying arg descriptions (other than replacing + // them entirely) as can be done with op descriptions. + string description = 3; + } + repeated Arg in_arg = 4; + repeated Arg out_arg = 5; + // List of original in_arg names to specify new argument order. + // Length of arg_order should be either empty to keep current order + // or match size of in_arg. + repeated string arg_order = 11; + + // Description of the graph-construction-time configuration of this + // Op. That is to say, this describes the attr fields that will + // be specified in the NodeDef. + message Attr { + string name = 1; + + // Change the name used to access this attr in the API from what + // is used in the GraphDef. Note that these names in `backticks` + // will also be replaced in the summary & description fields. + string rename_to = 2; + + // Specify a new default value to use for this attr. This default + // will be used when creating new graphs, as opposed to the + // default in the OpDef, which will be used when interpreting old + // GraphDefs. + AttrValue default_value = 3; + + // Note: this will replace any inherited attr doc, there is no current + // way of modifying attr descriptions as can be done with op descriptions. + string description = 4; + } + repeated Attr attr = 6; + + // One-line human-readable description of what the Op does. + string summary = 7; + + // Additional, longer human-readable description of what the Op does. + string description = 8; + + // Modify an existing/inherited description by adding text to the beginning + // or end. + string description_prefix = 9; + string description_suffix = 10; +} + +message ApiDefs { + repeated ApiDef op = 1; +} diff --git a/executor/proto/tensorflow/core/framework/attr_value.pb.go b/executor/proto/tensorflow/core/framework/attr_value.pb.go new file mode 100644 index 0000000000..711023329b --- /dev/null +++ b/executor/proto/tensorflow/core/framework/attr_value.pb.go @@ -0,0 +1,417 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/attr_value.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Protocol buffer representing the value for an attr used to configure an Op. +// Comment indicates the corresponding attr type. Only the field matching the +// attr type may be filled. +type AttrValue struct { + // Types that are valid to be assigned to Value: + // *AttrValue_S + // *AttrValue_I + // *AttrValue_F + // *AttrValue_B + // *AttrValue_Type + // *AttrValue_Shape + // *AttrValue_Tensor + // *AttrValue_List + // *AttrValue_Func + // *AttrValue_Placeholder + Value isAttrValue_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttrValue) Reset() { *m = AttrValue{} } +func (m *AttrValue) String() string { return proto.CompactTextString(m) } +func (*AttrValue) ProtoMessage() {} +func (*AttrValue) Descriptor() ([]byte, []int) { + return fileDescriptor_06e758bf81984406, []int{0} +} + +func (m *AttrValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttrValue.Unmarshal(m, b) +} +func (m *AttrValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttrValue.Marshal(b, m, deterministic) +} +func (m *AttrValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttrValue.Merge(m, src) +} +func (m *AttrValue) XXX_Size() int { + return xxx_messageInfo_AttrValue.Size(m) +} +func (m *AttrValue) XXX_DiscardUnknown() { + xxx_messageInfo_AttrValue.DiscardUnknown(m) +} + +var xxx_messageInfo_AttrValue proto.InternalMessageInfo + +type isAttrValue_Value interface { + isAttrValue_Value() +} + +type AttrValue_S struct { + S []byte `protobuf:"bytes,2,opt,name=s,proto3,oneof"` +} + +type AttrValue_I struct { + I int64 `protobuf:"varint,3,opt,name=i,proto3,oneof"` +} + +type AttrValue_F struct { + F float32 `protobuf:"fixed32,4,opt,name=f,proto3,oneof"` +} + +type AttrValue_B struct { + B bool `protobuf:"varint,5,opt,name=b,proto3,oneof"` +} + +type AttrValue_Type struct { + Type DataType `protobuf:"varint,6,opt,name=type,proto3,enum=tensorflow.DataType,oneof"` +} + +type AttrValue_Shape struct { + Shape *TensorShapeProto `protobuf:"bytes,7,opt,name=shape,proto3,oneof"` +} + +type AttrValue_Tensor struct { + Tensor *TensorProto `protobuf:"bytes,8,opt,name=tensor,proto3,oneof"` +} + +type AttrValue_List struct { + List *AttrValue_ListValue `protobuf:"bytes,1,opt,name=list,proto3,oneof"` +} + +type AttrValue_Func struct { + Func *NameAttrList `protobuf:"bytes,10,opt,name=func,proto3,oneof"` +} + +type AttrValue_Placeholder struct { + Placeholder string `protobuf:"bytes,9,opt,name=placeholder,proto3,oneof"` +} + +func (*AttrValue_S) isAttrValue_Value() {} + +func (*AttrValue_I) isAttrValue_Value() {} + +func (*AttrValue_F) isAttrValue_Value() {} + +func (*AttrValue_B) isAttrValue_Value() {} + +func (*AttrValue_Type) isAttrValue_Value() {} + +func (*AttrValue_Shape) isAttrValue_Value() {} + +func (*AttrValue_Tensor) isAttrValue_Value() {} + +func (*AttrValue_List) isAttrValue_Value() {} + +func (*AttrValue_Func) isAttrValue_Value() {} + +func (*AttrValue_Placeholder) isAttrValue_Value() {} + +func (m *AttrValue) GetValue() isAttrValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *AttrValue) GetS() []byte { + if x, ok := m.GetValue().(*AttrValue_S); ok { + return x.S + } + return nil +} + +func (m *AttrValue) GetI() int64 { + if x, ok := m.GetValue().(*AttrValue_I); ok { + return x.I + } + return 0 +} + +func (m *AttrValue) GetF() float32 { + if x, ok := m.GetValue().(*AttrValue_F); ok { + return x.F + } + return 0 +} + +func (m *AttrValue) GetB() bool { + if x, ok := m.GetValue().(*AttrValue_B); ok { + return x.B + } + return false +} + +func (m *AttrValue) GetType() DataType { + if x, ok := m.GetValue().(*AttrValue_Type); ok { + return x.Type + } + return DataType_DT_INVALID +} + +func (m *AttrValue) GetShape() *TensorShapeProto { + if x, ok := m.GetValue().(*AttrValue_Shape); ok { + return x.Shape + } + return nil +} + +func (m *AttrValue) GetTensor() *TensorProto { + if x, ok := m.GetValue().(*AttrValue_Tensor); ok { + return x.Tensor + } + return nil +} + +func (m *AttrValue) GetList() *AttrValue_ListValue { + if x, ok := m.GetValue().(*AttrValue_List); ok { + return x.List + } + return nil +} + +func (m *AttrValue) GetFunc() *NameAttrList { + if x, ok := m.GetValue().(*AttrValue_Func); ok { + return x.Func + } + return nil +} + +func (m *AttrValue) GetPlaceholder() string { + if x, ok := m.GetValue().(*AttrValue_Placeholder); ok { + return x.Placeholder + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AttrValue) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*AttrValue_S)(nil), + (*AttrValue_I)(nil), + (*AttrValue_F)(nil), + (*AttrValue_B)(nil), + (*AttrValue_Type)(nil), + (*AttrValue_Shape)(nil), + (*AttrValue_Tensor)(nil), + (*AttrValue_List)(nil), + (*AttrValue_Func)(nil), + (*AttrValue_Placeholder)(nil), + } +} + +// LINT.IfChange +type AttrValue_ListValue struct { + S [][]byte `protobuf:"bytes,2,rep,name=s,proto3" json:"s,omitempty"` + I []int64 `protobuf:"varint,3,rep,packed,name=i,proto3" json:"i,omitempty"` + F []float32 `protobuf:"fixed32,4,rep,packed,name=f,proto3" json:"f,omitempty"` + B []bool `protobuf:"varint,5,rep,packed,name=b,proto3" json:"b,omitempty"` + Type []DataType `protobuf:"varint,6,rep,packed,name=type,proto3,enum=tensorflow.DataType" json:"type,omitempty"` + Shape []*TensorShapeProto `protobuf:"bytes,7,rep,name=shape,proto3" json:"shape,omitempty"` + Tensor []*TensorProto `protobuf:"bytes,8,rep,name=tensor,proto3" json:"tensor,omitempty"` + Func []*NameAttrList `protobuf:"bytes,9,rep,name=func,proto3" json:"func,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttrValue_ListValue) Reset() { *m = AttrValue_ListValue{} } +func (m *AttrValue_ListValue) String() string { return proto.CompactTextString(m) } +func (*AttrValue_ListValue) ProtoMessage() {} +func (*AttrValue_ListValue) Descriptor() ([]byte, []int) { + return fileDescriptor_06e758bf81984406, []int{0, 0} +} + +func (m *AttrValue_ListValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttrValue_ListValue.Unmarshal(m, b) +} +func (m *AttrValue_ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttrValue_ListValue.Marshal(b, m, deterministic) +} +func (m *AttrValue_ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttrValue_ListValue.Merge(m, src) +} +func (m *AttrValue_ListValue) XXX_Size() int { + return xxx_messageInfo_AttrValue_ListValue.Size(m) +} +func (m *AttrValue_ListValue) XXX_DiscardUnknown() { + xxx_messageInfo_AttrValue_ListValue.DiscardUnknown(m) +} + +var xxx_messageInfo_AttrValue_ListValue proto.InternalMessageInfo + +func (m *AttrValue_ListValue) GetS() [][]byte { + if m != nil { + return m.S + } + return nil +} + +func (m *AttrValue_ListValue) GetI() []int64 { + if m != nil { + return m.I + } + return nil +} + +func (m *AttrValue_ListValue) GetF() []float32 { + if m != nil { + return m.F + } + return nil +} + +func (m *AttrValue_ListValue) GetB() []bool { + if m != nil { + return m.B + } + return nil +} + +func (m *AttrValue_ListValue) GetType() []DataType { + if m != nil { + return m.Type + } + return nil +} + +func (m *AttrValue_ListValue) GetShape() []*TensorShapeProto { + if m != nil { + return m.Shape + } + return nil +} + +func (m *AttrValue_ListValue) GetTensor() []*TensorProto { + if m != nil { + return m.Tensor + } + return nil +} + +func (m *AttrValue_ListValue) GetFunc() []*NameAttrList { + if m != nil { + return m.Func + } + return nil +} + +// A list of attr names and their values. The whole list is attached +// with a string name. E.g., MatMul[T=float]. +type NameAttrList struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Attr map[string]*AttrValue `protobuf:"bytes,2,rep,name=attr,proto3" json:"attr,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NameAttrList) Reset() { *m = NameAttrList{} } +func (m *NameAttrList) String() string { return proto.CompactTextString(m) } +func (*NameAttrList) ProtoMessage() {} +func (*NameAttrList) Descriptor() ([]byte, []int) { + return fileDescriptor_06e758bf81984406, []int{1} +} + +func (m *NameAttrList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NameAttrList.Unmarshal(m, b) +} +func (m *NameAttrList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NameAttrList.Marshal(b, m, deterministic) +} +func (m *NameAttrList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NameAttrList.Merge(m, src) +} +func (m *NameAttrList) XXX_Size() int { + return xxx_messageInfo_NameAttrList.Size(m) +} +func (m *NameAttrList) XXX_DiscardUnknown() { + xxx_messageInfo_NameAttrList.DiscardUnknown(m) +} + +var xxx_messageInfo_NameAttrList proto.InternalMessageInfo + +func (m *NameAttrList) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NameAttrList) GetAttr() map[string]*AttrValue { + if m != nil { + return m.Attr + } + return nil +} + +func init() { + proto.RegisterType((*AttrValue)(nil), "tensorflow.AttrValue") + proto.RegisterType((*AttrValue_ListValue)(nil), "tensorflow.AttrValue.ListValue") + proto.RegisterType((*NameAttrList)(nil), "tensorflow.NameAttrList") + proto.RegisterMapType((map[string]*AttrValue)(nil), "tensorflow.NameAttrList.AttrEntry") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/attr_value.proto", fileDescriptor_06e758bf81984406) +} + +var fileDescriptor_06e758bf81984406 = []byte{ + // 519 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xdf, 0x8a, 0xd3, 0x40, + 0x14, 0xc6, 0x3b, 0x99, 0xb4, 0xdb, 0x4c, 0xcb, 0x5a, 0x06, 0xc5, 0xa1, 0x08, 0x86, 0x82, 0x32, + 0xac, 0x25, 0xc1, 0xf8, 0x07, 0x11, 0xbc, 0xb0, 0x28, 0xf4, 0x42, 0x96, 0x25, 0x2e, 0x5e, 0x78, + 0xb3, 0x24, 0x75, 0xd2, 0x86, 0x4d, 0x3b, 0x61, 0x32, 0x75, 0xe9, 0x13, 0x78, 0xeb, 0x73, 0xf8, + 0x84, 0x5e, 0xca, 0x99, 0xc9, 0xa6, 0xc1, 0xdd, 0xee, 0xde, 0x9d, 0x73, 0xe6, 0xfb, 0x26, 0x27, + 0xbf, 0x39, 0x87, 0x9c, 0x68, 0xb1, 0xa9, 0xa4, 0xca, 0x0a, 0x79, 0x15, 0x2e, 0xa4, 0x12, 0x61, + 0xa6, 0x92, 0xb5, 0xb8, 0x92, 0xea, 0x32, 0x4c, 0xb4, 0x56, 0x17, 0x3f, 0x93, 0x62, 0x2b, 0x82, + 0x52, 0x49, 0x2d, 0x29, 0xd9, 0x6b, 0xc7, 0xcf, 0x0f, 0xfb, 0xec, 0x89, 0xf5, 0x8c, 0xa7, 0xf7, + 0xe9, 0x2e, 0xaa, 0x55, 0x52, 0xd6, 0x5f, 0x18, 0x3f, 0xbb, 0x43, 0xbd, 0x2b, 0x45, 0x65, 0x65, + 0x93, 0x5f, 0x5d, 0xe2, 0x7d, 0xd4, 0x5a, 0x7d, 0x83, 0xe6, 0xe8, 0x31, 0x41, 0x15, 0x73, 0x7c, + 0xc4, 0x87, 0xf3, 0x4e, 0x8c, 0x2a, 0xc8, 0x73, 0x86, 0x7d, 0xc4, 0x31, 0xe4, 0x39, 0xe4, 0x19, + 0x73, 0x7d, 0xc4, 0x1d, 0xc8, 0x33, 0xc8, 0x53, 0xd6, 0xf5, 0x11, 0xef, 0x43, 0x9e, 0xd2, 0x13, + 0xe2, 0xc2, 0xe5, 0xac, 0xe7, 0x23, 0x7e, 0x1c, 0x3d, 0x0c, 0xf6, 0x3d, 0x04, 0x9f, 0x12, 0x9d, + 0x9c, 0xef, 0x4a, 0x31, 0xef, 0xc4, 0x46, 0x43, 0x5f, 0x93, 0xae, 0xe9, 0x97, 0x1d, 0xf9, 0x88, + 0x0f, 0xa2, 0x27, 0x6d, 0xf1, 0xb9, 0x09, 0xbf, 0xc2, 0xf1, 0x19, 0xb4, 0x39, 0xef, 0xc4, 0x56, + 0x4c, 0x5f, 0x92, 0x9e, 0xd5, 0xb1, 0xbe, 0xb1, 0x3d, 0xbe, 0x69, 0xbb, 0x76, 0xd4, 0x42, 0xfa, + 0x86, 0xb8, 0x45, 0x5e, 0x69, 0x86, 0x8c, 0xe1, 0x69, 0xdb, 0xd0, 0xfc, 0x79, 0xf0, 0x25, 0xaf, + 0xb4, 0x89, 0xa0, 0x3f, 0x90, 0xd3, 0x80, 0xb8, 0xd9, 0x76, 0xb3, 0x60, 0xc4, 0xd8, 0x58, 0xdb, + 0x76, 0x9a, 0xac, 0x05, 0x58, 0xc1, 0x04, 0x7a, 0xd0, 0xd1, 0x09, 0x19, 0x94, 0x45, 0xb2, 0x10, + 0x2b, 0x59, 0xfc, 0x10, 0x8a, 0x79, 0x3e, 0xe2, 0xde, 0xbc, 0x13, 0xb7, 0x8b, 0xe3, 0xdf, 0x0e, + 0xf1, 0x9a, 0x2f, 0xd1, 0xa1, 0xa5, 0x8d, 0xf9, 0x10, 0x58, 0x8f, 0x2c, 0x6b, 0xcc, 0xf1, 0xcc, + 0x19, 0x21, 0xa0, 0x3d, 0xb2, 0xb4, 0x31, 0x77, 0x6c, 0x25, 0x83, 0x0a, 0xf0, 0xc6, 0xbc, 0x6f, + 0x2b, 0x29, 0x9d, 0x36, 0xc4, 0xf1, 0x21, 0xe2, 0x46, 0x6a, 0x99, 0x47, 0x7b, 0xe6, 0xf8, 0x3e, + 0xe6, 0xd7, 0xc4, 0xc3, 0x16, 0x71, 0x7c, 0x07, 0xf1, 0x86, 0xf7, 0xb4, 0x06, 0xe7, 0x19, 0xf9, + 0x41, 0x70, 0x16, 0xdb, 0xec, 0x88, 0x74, 0xcd, 0x62, 0x4c, 0xfe, 0x20, 0x32, 0x6c, 0x9f, 0x53, + 0x4a, 0xdc, 0x4d, 0xb2, 0x16, 0xe6, 0xdd, 0xbc, 0xd8, 0xc4, 0xf4, 0x2d, 0x71, 0x61, 0x97, 0x0c, + 0xb5, 0x41, 0x34, 0x39, 0x74, 0xb7, 0x79, 0xd8, 0xcf, 0x1b, 0xad, 0x76, 0xb1, 0xd1, 0x8f, 0x4f, + 0xed, 0x94, 0x9b, 0x12, 0x1d, 0x11, 0x7c, 0x29, 0x76, 0xf5, 0xbd, 0x10, 0xd2, 0x17, 0x75, 0x13, + 0x66, 0xf6, 0x07, 0xd1, 0xa3, 0x5b, 0x67, 0x24, 0xb6, 0x9a, 0xf7, 0xce, 0x3b, 0x34, 0x93, 0x84, + 0x49, 0xb5, 0x6c, 0xcb, 0x9a, 0xf5, 0x9a, 0x3d, 0x68, 0x1c, 0x86, 0x4b, 0x75, 0x86, 0xbe, 0x7f, + 0x58, 0xe6, 0x7a, 0xb5, 0x4d, 0x83, 0x85, 0x5c, 0x87, 0xad, 0xbd, 0xbc, 0x3d, 0x5c, 0xca, 0xff, + 0x16, 0xf6, 0x2f, 0x42, 0x69, 0xcf, 0xac, 0xeb, 0xab, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xab, + 0xbc, 0xb8, 0x90, 0x65, 0x04, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/attr_value.proto b/executor/proto/tensorflow/core/framework/attr_value.proto new file mode 100644 index 0000000000..054e3ec97c --- /dev/null +++ b/executor/proto/tensorflow/core/framework/attr_value.proto @@ -0,0 +1,62 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "AttrValueProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; +import "tensorflow/core/framework/tensor.proto"; +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/types.proto"; + +// Protocol buffer representing the value for an attr used to configure an Op. +// Comment indicates the corresponding attr type. Only the field matching the +// attr type may be filled. +message AttrValue { + // LINT.IfChange + message ListValue { + repeated bytes s = 2; // "list(string)" + repeated int64 i = 3 [packed = true]; // "list(int)" + repeated float f = 4 [packed = true]; // "list(float)" + repeated bool b = 5 [packed = true]; // "list(bool)" + repeated DataType type = 6 [packed = true]; // "list(type)" + repeated TensorShapeProto shape = 7; // "list(shape)" + repeated TensorProto tensor = 8; // "list(tensor)" + repeated NameAttrList func = 9; // "list(attr)" + } + // LINT.ThenChange(https://www.tensorflow.org/code/tensorflow/c/c_api.cc) + + oneof value { + bytes s = 2; // "string" + int64 i = 3; // "int" + float f = 4; // "float" + bool b = 5; // "bool" + DataType type = 6; // "type" + TensorShapeProto shape = 7; // "shape" + TensorProto tensor = 8; // "tensor" + ListValue list = 1; // any "list(...)" + + // "func" represents a function. func.name is a function's name or + // a primitive op's name. func.attr.first is the name of an attr + // defined for that function. func.attr.second is the value for + // that attr in the instantiation. + NameAttrList func = 10; + + // This is a placeholder only used in nodes defined inside a + // function. It indicates the attr value will be supplied when + // the function is instantiated. For example, let us suppose a + // node "N" in function "FN". "N" has an attr "A" with value + // placeholder = "foo". When FN is instantiated with attr "foo" + // set to "bar", the instantiated node N's attr A will have been + // given the value "bar". + string placeholder = 9; + } +} + +// A list of attr names and their values. The whole list is attached +// with a string name. E.g., MatMul[T=float]. +message NameAttrList { + string name = 1; + map attr = 2; +} diff --git a/executor/proto/tensorflow/core/framework/cost_graph.pb.go b/executor/proto/tensorflow/core/framework/cost_graph.pb.go new file mode 100644 index 0000000000..2cc1a276c3 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/cost_graph.pb.go @@ -0,0 +1,409 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/cost_graph.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type CostGraphDef struct { + Node []*CostGraphDef_Node `protobuf:"bytes,1,rep,name=node,proto3" json:"node,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CostGraphDef) Reset() { *m = CostGraphDef{} } +func (m *CostGraphDef) String() string { return proto.CompactTextString(m) } +func (*CostGraphDef) ProtoMessage() {} +func (*CostGraphDef) Descriptor() ([]byte, []int) { + return fileDescriptor_5f8948141565ace8, []int{0} +} + +func (m *CostGraphDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CostGraphDef.Unmarshal(m, b) +} +func (m *CostGraphDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CostGraphDef.Marshal(b, m, deterministic) +} +func (m *CostGraphDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_CostGraphDef.Merge(m, src) +} +func (m *CostGraphDef) XXX_Size() int { + return xxx_messageInfo_CostGraphDef.Size(m) +} +func (m *CostGraphDef) XXX_DiscardUnknown() { + xxx_messageInfo_CostGraphDef.DiscardUnknown(m) +} + +var xxx_messageInfo_CostGraphDef proto.InternalMessageInfo + +func (m *CostGraphDef) GetNode() []*CostGraphDef_Node { + if m != nil { + return m.Node + } + return nil +} + +type CostGraphDef_Node struct { + // The name of the node. Names are globally unique. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The device of the node. Can be empty if the node is mapped to the + // default partition or partitioning hasn't been run yet. + Device string `protobuf:"bytes,2,opt,name=device,proto3" json:"device,omitempty"` + // The id of the node. Node ids are only unique inside a partition. + Id int32 `protobuf:"varint,3,opt,name=id,proto3" json:"id,omitempty"` + InputInfo []*CostGraphDef_Node_InputInfo `protobuf:"bytes,4,rep,name=input_info,json=inputInfo,proto3" json:"input_info,omitempty"` + OutputInfo []*CostGraphDef_Node_OutputInfo `protobuf:"bytes,5,rep,name=output_info,json=outputInfo,proto3" json:"output_info,omitempty"` + // Temporary memory used by this node. + TemporaryMemorySize int64 `protobuf:"varint,6,opt,name=temporary_memory_size,json=temporaryMemorySize,proto3" json:"temporary_memory_size,omitempty"` + // Persistent memory used by this node. + PersistentMemorySize int64 `protobuf:"varint,12,opt,name=persistent_memory_size,json=persistentMemorySize,proto3" json:"persistent_memory_size,omitempty"` + HostTempMemorySize int64 `protobuf:"varint,10,opt,name=host_temp_memory_size,json=hostTempMemorySize,proto3" json:"host_temp_memory_size,omitempty"` // Deprecated: Do not use. + DeviceTempMemorySize int64 `protobuf:"varint,11,opt,name=device_temp_memory_size,json=deviceTempMemorySize,proto3" json:"device_temp_memory_size,omitempty"` // Deprecated: Do not use. + DevicePersistentMemorySize int64 `protobuf:"varint,16,opt,name=device_persistent_memory_size,json=devicePersistentMemorySize,proto3" json:"device_persistent_memory_size,omitempty"` // Deprecated: Do not use. + // Estimate of the computational cost of this node, in microseconds. + ComputeCost int64 `protobuf:"varint,9,opt,name=compute_cost,json=computeCost,proto3" json:"compute_cost,omitempty"` + // Analytical estimate of the computational cost of this node, in + // microseconds. + ComputeTime int64 `protobuf:"varint,14,opt,name=compute_time,json=computeTime,proto3" json:"compute_time,omitempty"` + // Analytical estimate of the memory access cost of this node, in + // microseconds. + MemoryTime int64 `protobuf:"varint,15,opt,name=memory_time,json=memoryTime,proto3" json:"memory_time,omitempty"` + // If true, the output is permanent: it can't be discarded, because this + // node is part of the "final output". Nodes may depend on final nodes. + IsFinal bool `protobuf:"varint,7,opt,name=is_final,json=isFinal,proto3" json:"is_final,omitempty"` + // Ids of the control inputs for this node. + ControlInput []int32 `protobuf:"varint,8,rep,packed,name=control_input,json=controlInput,proto3" json:"control_input,omitempty"` + // Are the costs inaccurate? + Inaccurate bool `protobuf:"varint,17,opt,name=inaccurate,proto3" json:"inaccurate,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CostGraphDef_Node) Reset() { *m = CostGraphDef_Node{} } +func (m *CostGraphDef_Node) String() string { return proto.CompactTextString(m) } +func (*CostGraphDef_Node) ProtoMessage() {} +func (*CostGraphDef_Node) Descriptor() ([]byte, []int) { + return fileDescriptor_5f8948141565ace8, []int{0, 0} +} + +func (m *CostGraphDef_Node) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CostGraphDef_Node.Unmarshal(m, b) +} +func (m *CostGraphDef_Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CostGraphDef_Node.Marshal(b, m, deterministic) +} +func (m *CostGraphDef_Node) XXX_Merge(src proto.Message) { + xxx_messageInfo_CostGraphDef_Node.Merge(m, src) +} +func (m *CostGraphDef_Node) XXX_Size() int { + return xxx_messageInfo_CostGraphDef_Node.Size(m) +} +func (m *CostGraphDef_Node) XXX_DiscardUnknown() { + xxx_messageInfo_CostGraphDef_Node.DiscardUnknown(m) +} + +var xxx_messageInfo_CostGraphDef_Node proto.InternalMessageInfo + +func (m *CostGraphDef_Node) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CostGraphDef_Node) GetDevice() string { + if m != nil { + return m.Device + } + return "" +} + +func (m *CostGraphDef_Node) GetId() int32 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *CostGraphDef_Node) GetInputInfo() []*CostGraphDef_Node_InputInfo { + if m != nil { + return m.InputInfo + } + return nil +} + +func (m *CostGraphDef_Node) GetOutputInfo() []*CostGraphDef_Node_OutputInfo { + if m != nil { + return m.OutputInfo + } + return nil +} + +func (m *CostGraphDef_Node) GetTemporaryMemorySize() int64 { + if m != nil { + return m.TemporaryMemorySize + } + return 0 +} + +func (m *CostGraphDef_Node) GetPersistentMemorySize() int64 { + if m != nil { + return m.PersistentMemorySize + } + return 0 +} + +// Deprecated: Do not use. +func (m *CostGraphDef_Node) GetHostTempMemorySize() int64 { + if m != nil { + return m.HostTempMemorySize + } + return 0 +} + +// Deprecated: Do not use. +func (m *CostGraphDef_Node) GetDeviceTempMemorySize() int64 { + if m != nil { + return m.DeviceTempMemorySize + } + return 0 +} + +// Deprecated: Do not use. +func (m *CostGraphDef_Node) GetDevicePersistentMemorySize() int64 { + if m != nil { + return m.DevicePersistentMemorySize + } + return 0 +} + +func (m *CostGraphDef_Node) GetComputeCost() int64 { + if m != nil { + return m.ComputeCost + } + return 0 +} + +func (m *CostGraphDef_Node) GetComputeTime() int64 { + if m != nil { + return m.ComputeTime + } + return 0 +} + +func (m *CostGraphDef_Node) GetMemoryTime() int64 { + if m != nil { + return m.MemoryTime + } + return 0 +} + +func (m *CostGraphDef_Node) GetIsFinal() bool { + if m != nil { + return m.IsFinal + } + return false +} + +func (m *CostGraphDef_Node) GetControlInput() []int32 { + if m != nil { + return m.ControlInput + } + return nil +} + +func (m *CostGraphDef_Node) GetInaccurate() bool { + if m != nil { + return m.Inaccurate + } + return false +} + +// Inputs of this node. They must be executed before this node can be +// executed. An input is a particular output of another node, specified +// by the node id and the output index. +type CostGraphDef_Node_InputInfo struct { + PrecedingNode int32 `protobuf:"varint,1,opt,name=preceding_node,json=precedingNode,proto3" json:"preceding_node,omitempty"` + PrecedingPort int32 `protobuf:"varint,2,opt,name=preceding_port,json=precedingPort,proto3" json:"preceding_port,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CostGraphDef_Node_InputInfo) Reset() { *m = CostGraphDef_Node_InputInfo{} } +func (m *CostGraphDef_Node_InputInfo) String() string { return proto.CompactTextString(m) } +func (*CostGraphDef_Node_InputInfo) ProtoMessage() {} +func (*CostGraphDef_Node_InputInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_5f8948141565ace8, []int{0, 0, 0} +} + +func (m *CostGraphDef_Node_InputInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CostGraphDef_Node_InputInfo.Unmarshal(m, b) +} +func (m *CostGraphDef_Node_InputInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CostGraphDef_Node_InputInfo.Marshal(b, m, deterministic) +} +func (m *CostGraphDef_Node_InputInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_CostGraphDef_Node_InputInfo.Merge(m, src) +} +func (m *CostGraphDef_Node_InputInfo) XXX_Size() int { + return xxx_messageInfo_CostGraphDef_Node_InputInfo.Size(m) +} +func (m *CostGraphDef_Node_InputInfo) XXX_DiscardUnknown() { + xxx_messageInfo_CostGraphDef_Node_InputInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_CostGraphDef_Node_InputInfo proto.InternalMessageInfo + +func (m *CostGraphDef_Node_InputInfo) GetPrecedingNode() int32 { + if m != nil { + return m.PrecedingNode + } + return 0 +} + +func (m *CostGraphDef_Node_InputInfo) GetPrecedingPort() int32 { + if m != nil { + return m.PrecedingPort + } + return 0 +} + +// Outputs of this node. +type CostGraphDef_Node_OutputInfo struct { + Size int64 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"` + // If >= 0, the output is an alias of an input. Note that an alias input + // may itself be an alias. The algorithm will therefore need to follow + // those pointers. + AliasInputPort int64 `protobuf:"varint,2,opt,name=alias_input_port,json=aliasInputPort,proto3" json:"alias_input_port,omitempty"` + Shape *TensorShapeProto `protobuf:"bytes,3,opt,name=shape,proto3" json:"shape,omitempty"` + Dtype DataType `protobuf:"varint,4,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CostGraphDef_Node_OutputInfo) Reset() { *m = CostGraphDef_Node_OutputInfo{} } +func (m *CostGraphDef_Node_OutputInfo) String() string { return proto.CompactTextString(m) } +func (*CostGraphDef_Node_OutputInfo) ProtoMessage() {} +func (*CostGraphDef_Node_OutputInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_5f8948141565ace8, []int{0, 0, 1} +} + +func (m *CostGraphDef_Node_OutputInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CostGraphDef_Node_OutputInfo.Unmarshal(m, b) +} +func (m *CostGraphDef_Node_OutputInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CostGraphDef_Node_OutputInfo.Marshal(b, m, deterministic) +} +func (m *CostGraphDef_Node_OutputInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_CostGraphDef_Node_OutputInfo.Merge(m, src) +} +func (m *CostGraphDef_Node_OutputInfo) XXX_Size() int { + return xxx_messageInfo_CostGraphDef_Node_OutputInfo.Size(m) +} +func (m *CostGraphDef_Node_OutputInfo) XXX_DiscardUnknown() { + xxx_messageInfo_CostGraphDef_Node_OutputInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_CostGraphDef_Node_OutputInfo proto.InternalMessageInfo + +func (m *CostGraphDef_Node_OutputInfo) GetSize() int64 { + if m != nil { + return m.Size + } + return 0 +} + +func (m *CostGraphDef_Node_OutputInfo) GetAliasInputPort() int64 { + if m != nil { + return m.AliasInputPort + } + return 0 +} + +func (m *CostGraphDef_Node_OutputInfo) GetShape() *TensorShapeProto { + if m != nil { + return m.Shape + } + return nil +} + +func (m *CostGraphDef_Node_OutputInfo) GetDtype() DataType { + if m != nil { + return m.Dtype + } + return DataType_DT_INVALID +} + +func init() { + proto.RegisterType((*CostGraphDef)(nil), "tensorflow.CostGraphDef") + proto.RegisterType((*CostGraphDef_Node)(nil), "tensorflow.CostGraphDef.Node") + proto.RegisterType((*CostGraphDef_Node_InputInfo)(nil), "tensorflow.CostGraphDef.Node.InputInfo") + proto.RegisterType((*CostGraphDef_Node_OutputInfo)(nil), "tensorflow.CostGraphDef.Node.OutputInfo") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/cost_graph.proto", fileDescriptor_5f8948141565ace8) +} + +var fileDescriptor_5f8948141565ace8 = []byte{ + // 628 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x94, 0xcf, 0x6e, 0xd3, 0x4e, + 0x10, 0xc7, 0xe5, 0xfc, 0x69, 0x93, 0x49, 0x9a, 0xf6, 0xb7, 0xbf, 0xb6, 0x2c, 0x11, 0x85, 0x00, + 0xaa, 0xb0, 0x2a, 0x94, 0x88, 0x00, 0x07, 0x0e, 0x5c, 0x4a, 0x29, 0xea, 0x01, 0x88, 0xdc, 0x5c, + 0xe0, 0x62, 0xb9, 0xf6, 0x26, 0x59, 0x11, 0x7b, 0xac, 0xdd, 0x0d, 0x55, 0xfa, 0x0a, 0xbc, 0x09, + 0x2f, 0xc2, 0x2b, 0x71, 0x44, 0x3b, 0x36, 0x8e, 0x53, 0xda, 0xde, 0xc6, 0x33, 0x9f, 0xef, 0x77, + 0xff, 0x78, 0x66, 0xe1, 0xc8, 0x88, 0x44, 0xa3, 0x9a, 0xcc, 0xf1, 0x72, 0x10, 0xa2, 0x12, 0x83, + 0x89, 0x0a, 0x62, 0x71, 0x89, 0xea, 0xdb, 0x20, 0x44, 0x6d, 0xfc, 0xa9, 0x0a, 0xd2, 0x59, 0x3f, + 0x55, 0x68, 0x90, 0xc1, 0x8a, 0xed, 0x3e, 0xbf, 0x5d, 0x97, 0x55, 0x7c, 0x3d, 0x0b, 0x52, 0x91, + 0x29, 0xbb, 0x87, 0x77, 0xd0, 0xcb, 0x54, 0xe8, 0x0c, 0x7b, 0xf2, 0xa3, 0x01, 0xed, 0x77, 0xa8, + 0xcd, 0x07, 0xbb, 0xe8, 0x89, 0x98, 0xb0, 0x17, 0x50, 0x4b, 0x30, 0x12, 0xdc, 0xe9, 0x55, 0xdd, + 0xd6, 0xf0, 0xa0, 0xbf, 0xb2, 0xe9, 0x97, 0xb9, 0xfe, 0x27, 0x8c, 0x84, 0x47, 0x68, 0xf7, 0xd7, + 0x26, 0xd4, 0xec, 0x27, 0x63, 0x50, 0x4b, 0x82, 0xd8, 0x6a, 0x1d, 0xb7, 0xe9, 0x51, 0xcc, 0xf6, + 0x61, 0x23, 0x12, 0xdf, 0x65, 0x28, 0x78, 0x85, 0xb2, 0xf9, 0x17, 0xeb, 0x40, 0x45, 0x46, 0xbc, + 0xda, 0x73, 0xdc, 0xba, 0x57, 0x91, 0x11, 0x3b, 0x05, 0x90, 0x49, 0xba, 0x30, 0xbe, 0x4c, 0x26, + 0xc8, 0x6b, 0xb4, 0xfa, 0xb3, 0x3b, 0x57, 0xef, 0x9f, 0x59, 0xfe, 0x2c, 0x99, 0xa0, 0xd7, 0x94, + 0x7f, 0x43, 0x76, 0x06, 0x2d, 0x5c, 0x98, 0xc2, 0xa8, 0x4e, 0x46, 0xee, 0xdd, 0x46, 0x9f, 0x49, + 0x40, 0x4e, 0x80, 0x45, 0xcc, 0x86, 0xb0, 0x67, 0x44, 0x9c, 0xa2, 0x0a, 0xd4, 0xd2, 0x8f, 0x45, + 0x8c, 0x6a, 0xe9, 0x6b, 0x79, 0x25, 0xf8, 0x46, 0xcf, 0x71, 0xab, 0xde, 0xff, 0x45, 0xf1, 0x23, + 0xd5, 0xce, 0xe5, 0x95, 0x60, 0xaf, 0x60, 0x3f, 0x15, 0x4a, 0x4b, 0x6d, 0x44, 0x62, 0xd6, 0x44, + 0x6d, 0x12, 0xed, 0xae, 0xaa, 0x25, 0xd5, 0x6b, 0xd8, 0x9b, 0xd9, 0x5f, 0x6f, 0x1d, 0xd7, 0x44, + 0x60, 0x45, 0xc7, 0x15, 0xee, 0x78, 0xcc, 0x02, 0x63, 0x11, 0xa7, 0x25, 0xd9, 0x1b, 0xb8, 0x97, + 0xdd, 0xe6, 0xbf, 0xc2, 0x56, 0x21, 0xdc, 0xcd, 0x90, 0x6b, 0xd2, 0xf7, 0x70, 0x90, 0x4b, 0x6f, + 0xd9, 0xee, 0x4e, 0x61, 0xd0, 0xcd, 0xc0, 0xd1, 0x4d, 0x1b, 0x7f, 0x0c, 0xed, 0x10, 0xe3, 0x74, + 0x61, 0x84, 0x6f, 0x7b, 0x97, 0x37, 0xe9, 0x90, 0xad, 0x3c, 0x67, 0x6f, 0xba, 0x8c, 0x18, 0x19, + 0x0b, 0xde, 0x59, 0x43, 0xc6, 0x32, 0x16, 0xec, 0x11, 0xb4, 0xf2, 0xa5, 0x89, 0xd8, 0x26, 0x02, + 0xb2, 0x14, 0x01, 0xf7, 0xa1, 0x21, 0xb5, 0x3f, 0x91, 0x49, 0x30, 0xe7, 0x9b, 0x3d, 0xc7, 0x6d, + 0x78, 0x9b, 0x52, 0x9f, 0xda, 0x4f, 0xf6, 0x14, 0xb6, 0x42, 0x4c, 0x8c, 0xc2, 0xb9, 0x4f, 0x4d, + 0xc0, 0x1b, 0xbd, 0xaa, 0x5b, 0xf7, 0xda, 0x79, 0x92, 0x7a, 0x84, 0x3d, 0xb4, 0xcd, 0x15, 0x84, + 0xe1, 0x42, 0x05, 0x46, 0xf0, 0xff, 0xc8, 0xa1, 0x94, 0xe9, 0x7e, 0x81, 0x66, 0xd1, 0x4c, 0xec, + 0x10, 0x3a, 0xa9, 0x12, 0xa1, 0x88, 0x64, 0x32, 0xf5, 0xf3, 0x59, 0xb0, 0x5d, 0xba, 0x55, 0x64, + 0xa9, 0xd9, 0xd7, 0xb0, 0x14, 0x95, 0xa1, 0x06, 0x2f, 0x63, 0x23, 0x54, 0xa6, 0xfb, 0xd3, 0x01, + 0x58, 0xf5, 0x97, 0x1d, 0x11, 0xba, 0x5e, 0x87, 0xce, 0x48, 0x31, 0x73, 0x61, 0x27, 0x98, 0xcb, + 0x40, 0x67, 0x07, 0x58, 0x79, 0x55, 0xbd, 0x0e, 0xe5, 0x69, 0x6b, 0xd6, 0x8c, 0x0d, 0xa1, 0x4e, + 0x33, 0x4e, 0x73, 0xd3, 0x1a, 0x3e, 0x28, 0xb7, 0xf5, 0x98, 0xc2, 0x73, 0x5b, 0x1e, 0xd9, 0xd1, + 0xf6, 0x32, 0x94, 0x1d, 0x41, 0x3d, 0xb2, 0x13, 0xcf, 0x6b, 0x3d, 0xc7, 0xed, 0x0c, 0x77, 0xcb, + 0x9a, 0x93, 0xc0, 0x04, 0xe3, 0x65, 0x2a, 0xbc, 0x0c, 0x39, 0x46, 0xe0, 0xa8, 0xa6, 0x65, 0xa2, + 0x78, 0x35, 0x8e, 0xb7, 0x8b, 0xb9, 0x21, 0x7b, 0x3d, 0x72, 0xbe, 0xbe, 0x9d, 0x4a, 0x33, 0x5b, + 0x5c, 0xf4, 0x43, 0x8c, 0x07, 0xa5, 0xe7, 0xe6, 0xe6, 0x70, 0x8a, 0xd7, 0xde, 0xa1, 0xdf, 0x8e, + 0x73, 0xb1, 0x41, 0xaf, 0xd0, 0xcb, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x80, 0x4b, 0x7c, 0xd5, + 0x14, 0x05, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/cost_graph.proto b/executor/proto/tensorflow/core/framework/cost_graph.proto new file mode 100644 index 0000000000..cc6bc84d69 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/cost_graph.proto @@ -0,0 +1,77 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "CostGraphProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/types.proto"; + +message CostGraphDef { + message Node { + // The name of the node. Names are globally unique. + string name = 1; + + // The device of the node. Can be empty if the node is mapped to the + // default partition or partitioning hasn't been run yet. + string device = 2; + + // The id of the node. Node ids are only unique inside a partition. + int32 id = 3; + + // Inputs of this node. They must be executed before this node can be + // executed. An input is a particular output of another node, specified + // by the node id and the output index. + message InputInfo { + int32 preceding_node = 1; + int32 preceding_port = 2; + } + repeated InputInfo input_info = 4; + + // Outputs of this node. + message OutputInfo { + int64 size = 1; + // If >= 0, the output is an alias of an input. Note that an alias input + // may itself be an alias. The algorithm will therefore need to follow + // those pointers. + int64 alias_input_port = 2; + TensorShapeProto shape = 3; + DataType dtype = 4; + } + repeated OutputInfo output_info = 5; + + // Temporary memory used by this node. + int64 temporary_memory_size = 6; + + // Persistent memory used by this node. + int64 persistent_memory_size = 12; + + int64 host_temp_memory_size = 10 [deprecated = true]; + int64 device_temp_memory_size = 11 [deprecated = true]; + int64 device_persistent_memory_size = 16 [deprecated = true]; + + // Estimate of the computational cost of this node, in microseconds. + int64 compute_cost = 9; + + // Analytical estimate of the computational cost of this node, in + // microseconds. + int64 compute_time = 14; + + // Analytical estimate of the memory access cost of this node, in + // microseconds. + int64 memory_time = 15; + + // If true, the output is permanent: it can't be discarded, because this + // node is part of the "final output". Nodes may depend on final nodes. + bool is_final = 7; + + // Ids of the control inputs for this node. + repeated int32 control_input = 8; + + // Are the costs inaccurate? + bool inaccurate = 17; + } + repeated Node node = 1; +} diff --git a/executor/proto/tensorflow/core/framework/device_attributes.pb.go b/executor/proto/tensorflow/core/framework/device_attributes.pb.go new file mode 100644 index 0000000000..2381ff6122 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/device_attributes.pb.go @@ -0,0 +1,302 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/device_attributes.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type InterconnectLink struct { + DeviceId int32 `protobuf:"varint,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"` + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Strength int32 `protobuf:"varint,3,opt,name=strength,proto3" json:"strength,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InterconnectLink) Reset() { *m = InterconnectLink{} } +func (m *InterconnectLink) String() string { return proto.CompactTextString(m) } +func (*InterconnectLink) ProtoMessage() {} +func (*InterconnectLink) Descriptor() ([]byte, []int) { + return fileDescriptor_74908851c78ce22e, []int{0} +} + +func (m *InterconnectLink) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InterconnectLink.Unmarshal(m, b) +} +func (m *InterconnectLink) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InterconnectLink.Marshal(b, m, deterministic) +} +func (m *InterconnectLink) XXX_Merge(src proto.Message) { + xxx_messageInfo_InterconnectLink.Merge(m, src) +} +func (m *InterconnectLink) XXX_Size() int { + return xxx_messageInfo_InterconnectLink.Size(m) +} +func (m *InterconnectLink) XXX_DiscardUnknown() { + xxx_messageInfo_InterconnectLink.DiscardUnknown(m) +} + +var xxx_messageInfo_InterconnectLink proto.InternalMessageInfo + +func (m *InterconnectLink) GetDeviceId() int32 { + if m != nil { + return m.DeviceId + } + return 0 +} + +func (m *InterconnectLink) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *InterconnectLink) GetStrength() int32 { + if m != nil { + return m.Strength + } + return 0 +} + +type LocalLinks struct { + Link []*InterconnectLink `protobuf:"bytes,1,rep,name=link,proto3" json:"link,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LocalLinks) Reset() { *m = LocalLinks{} } +func (m *LocalLinks) String() string { return proto.CompactTextString(m) } +func (*LocalLinks) ProtoMessage() {} +func (*LocalLinks) Descriptor() ([]byte, []int) { + return fileDescriptor_74908851c78ce22e, []int{1} +} + +func (m *LocalLinks) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LocalLinks.Unmarshal(m, b) +} +func (m *LocalLinks) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LocalLinks.Marshal(b, m, deterministic) +} +func (m *LocalLinks) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalLinks.Merge(m, src) +} +func (m *LocalLinks) XXX_Size() int { + return xxx_messageInfo_LocalLinks.Size(m) +} +func (m *LocalLinks) XXX_DiscardUnknown() { + xxx_messageInfo_LocalLinks.DiscardUnknown(m) +} + +var xxx_messageInfo_LocalLinks proto.InternalMessageInfo + +func (m *LocalLinks) GetLink() []*InterconnectLink { + if m != nil { + return m.Link + } + return nil +} + +type DeviceLocality struct { + // Optional bus locality of device. Default value of 0 means + // no specific locality. Specific localities are indexed from 1. + BusId int32 `protobuf:"varint,1,opt,name=bus_id,json=busId,proto3" json:"bus_id,omitempty"` + // Optional NUMA locality of device. + NumaNode int32 `protobuf:"varint,2,opt,name=numa_node,json=numaNode,proto3" json:"numa_node,omitempty"` + // Optional local interconnect links to other devices. + Links *LocalLinks `protobuf:"bytes,3,opt,name=links,proto3" json:"links,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeviceLocality) Reset() { *m = DeviceLocality{} } +func (m *DeviceLocality) String() string { return proto.CompactTextString(m) } +func (*DeviceLocality) ProtoMessage() {} +func (*DeviceLocality) Descriptor() ([]byte, []int) { + return fileDescriptor_74908851c78ce22e, []int{2} +} + +func (m *DeviceLocality) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeviceLocality.Unmarshal(m, b) +} +func (m *DeviceLocality) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeviceLocality.Marshal(b, m, deterministic) +} +func (m *DeviceLocality) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceLocality.Merge(m, src) +} +func (m *DeviceLocality) XXX_Size() int { + return xxx_messageInfo_DeviceLocality.Size(m) +} +func (m *DeviceLocality) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceLocality.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceLocality proto.InternalMessageInfo + +func (m *DeviceLocality) GetBusId() int32 { + if m != nil { + return m.BusId + } + return 0 +} + +func (m *DeviceLocality) GetNumaNode() int32 { + if m != nil { + return m.NumaNode + } + return 0 +} + +func (m *DeviceLocality) GetLinks() *LocalLinks { + if m != nil { + return m.Links + } + return nil +} + +type DeviceAttributes struct { + // Fully specified name of the device within a cluster. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // String representation of device_type. + DeviceType string `protobuf:"bytes,2,opt,name=device_type,json=deviceType,proto3" json:"device_type,omitempty"` + // Memory capacity of device in bytes. + MemoryLimit int64 `protobuf:"varint,4,opt,name=memory_limit,json=memoryLimit,proto3" json:"memory_limit,omitempty"` + // Platform-specific data about device that may be useful + // for supporting efficient data transfers. + Locality *DeviceLocality `protobuf:"bytes,5,opt,name=locality,proto3" json:"locality,omitempty"` + // A device is assigned a global unique number each time it is + // initialized. "incarnation" should never be 0. + Incarnation uint64 `protobuf:"fixed64,6,opt,name=incarnation,proto3" json:"incarnation,omitempty"` + // String representation of the physical device that this device maps to. + PhysicalDeviceDesc string `protobuf:"bytes,7,opt,name=physical_device_desc,json=physicalDeviceDesc,proto3" json:"physical_device_desc,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeviceAttributes) Reset() { *m = DeviceAttributes{} } +func (m *DeviceAttributes) String() string { return proto.CompactTextString(m) } +func (*DeviceAttributes) ProtoMessage() {} +func (*DeviceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_74908851c78ce22e, []int{3} +} + +func (m *DeviceAttributes) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeviceAttributes.Unmarshal(m, b) +} +func (m *DeviceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeviceAttributes.Marshal(b, m, deterministic) +} +func (m *DeviceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAttributes.Merge(m, src) +} +func (m *DeviceAttributes) XXX_Size() int { + return xxx_messageInfo_DeviceAttributes.Size(m) +} +func (m *DeviceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAttributes proto.InternalMessageInfo + +func (m *DeviceAttributes) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DeviceAttributes) GetDeviceType() string { + if m != nil { + return m.DeviceType + } + return "" +} + +func (m *DeviceAttributes) GetMemoryLimit() int64 { + if m != nil { + return m.MemoryLimit + } + return 0 +} + +func (m *DeviceAttributes) GetLocality() *DeviceLocality { + if m != nil { + return m.Locality + } + return nil +} + +func (m *DeviceAttributes) GetIncarnation() uint64 { + if m != nil { + return m.Incarnation + } + return 0 +} + +func (m *DeviceAttributes) GetPhysicalDeviceDesc() string { + if m != nil { + return m.PhysicalDeviceDesc + } + return "" +} + +func init() { + proto.RegisterType((*InterconnectLink)(nil), "tensorflow.InterconnectLink") + proto.RegisterType((*LocalLinks)(nil), "tensorflow.LocalLinks") + proto.RegisterType((*DeviceLocality)(nil), "tensorflow.DeviceLocality") + proto.RegisterType((*DeviceAttributes)(nil), "tensorflow.DeviceAttributes") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/device_attributes.proto", fileDescriptor_74908851c78ce22e) +} + +var fileDescriptor_74908851c78ce22e = []byte{ + // 416 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0xc1, 0x6b, 0xdb, 0x30, + 0x14, 0xc6, 0xd1, 0x12, 0x67, 0xc9, 0xf3, 0x18, 0x45, 0x6c, 0x45, 0x74, 0x83, 0x79, 0x39, 0xf9, + 0x30, 0x92, 0xae, 0x83, 0xdd, 0x36, 0x58, 0xe9, 0x25, 0x10, 0x46, 0x11, 0x3b, 0xed, 0x62, 0x64, + 0x59, 0x4d, 0x44, 0x6c, 0x29, 0x48, 0x72, 0x4b, 0xfe, 0xf1, 0xb1, 0xe3, 0x90, 0xe4, 0xc4, 0x5e, + 0xe8, 0xed, 0xf9, 0xf3, 0xd3, 0xfb, 0x7e, 0xef, 0xf1, 0xc1, 0x67, 0x27, 0x94, 0xd5, 0xe6, 0xa1, + 0xd6, 0x4f, 0x4b, 0xae, 0x8d, 0x58, 0x3e, 0x18, 0xd6, 0x88, 0x27, 0x6d, 0x76, 0xcb, 0x4a, 0x3c, + 0x4a, 0x2e, 0x0a, 0xe6, 0x9c, 0x91, 0x65, 0xeb, 0x84, 0x5d, 0xec, 0x8d, 0x76, 0x1a, 0x43, 0xff, + 0x64, 0x5e, 0xc0, 0xc5, 0x4a, 0x39, 0x61, 0xb8, 0x56, 0x4a, 0x70, 0xb7, 0x96, 0x6a, 0x87, 0xdf, + 0xc1, 0xac, 0x7b, 0x2a, 0x2b, 0x82, 0x32, 0x94, 0x27, 0x74, 0x1a, 0x85, 0x55, 0x85, 0x31, 0x8c, + 0xdd, 0x61, 0x2f, 0xc8, 0x8b, 0x0c, 0xe5, 0x33, 0x1a, 0x6a, 0x7c, 0x05, 0x53, 0xeb, 0x8c, 0x50, + 0x1b, 0xb7, 0x25, 0xa3, 0xd8, 0x7f, 0xfc, 0x9e, 0x7f, 0x07, 0x58, 0x6b, 0xce, 0x6a, 0x3f, 0xd9, + 0xe2, 0x6b, 0x18, 0xd7, 0x52, 0xed, 0x08, 0xca, 0x46, 0x79, 0x7a, 0xf3, 0x7e, 0xd1, 0x93, 0x2c, + 0xce, 0x31, 0x68, 0xe8, 0x9c, 0x1b, 0x78, 0x7d, 0x17, 0xbc, 0xc3, 0x14, 0xe9, 0x0e, 0xf8, 0x2d, + 0x4c, 0xca, 0xd6, 0xf6, 0x6c, 0x49, 0xd9, 0xda, 0x55, 0xe5, 0xa9, 0x55, 0xdb, 0xb0, 0x42, 0xe9, + 0x2a, 0xd2, 0x25, 0x74, 0xea, 0x85, 0x9f, 0xba, 0x12, 0xf8, 0x13, 0x24, 0x7e, 0x9a, 0x0d, 0x78, + 0xe9, 0xcd, 0xe5, 0xd0, 0xb8, 0xc7, 0xa3, 0xb1, 0x69, 0xfe, 0x07, 0xc1, 0x45, 0x34, 0xfd, 0x71, + 0xba, 0x9d, 0x5f, 0x5c, 0xb1, 0x46, 0x04, 0xd3, 0x19, 0x0d, 0x35, 0xfe, 0x00, 0x69, 0x77, 0xa9, + 0xc1, 0x4d, 0x20, 0x4a, 0xbf, 0xfc, 0x65, 0x3e, 0xc2, 0xab, 0x46, 0x34, 0xda, 0x1c, 0x8a, 0x5a, + 0x36, 0xd2, 0x91, 0x71, 0x86, 0xf2, 0x11, 0x4d, 0xa3, 0xb6, 0xf6, 0x12, 0xfe, 0x0a, 0xd3, 0xba, + 0x5b, 0x8d, 0x24, 0x81, 0xee, 0x6a, 0x48, 0xf7, 0xff, 0xf2, 0xf4, 0xd4, 0x8b, 0x33, 0x48, 0xa5, + 0xe2, 0xcc, 0x28, 0xe6, 0xa4, 0x56, 0x64, 0x92, 0xa1, 0x7c, 0x42, 0x87, 0x12, 0xbe, 0x86, 0x37, + 0xfb, 0xed, 0xc1, 0x4a, 0xce, 0xea, 0xa2, 0xc3, 0xac, 0x84, 0xe5, 0xe4, 0x65, 0xc0, 0xc4, 0xc7, + 0x7f, 0xd1, 0xe1, 0x4e, 0x58, 0x7e, 0xfb, 0x08, 0x44, 0x9b, 0xcd, 0xd0, 0xfe, 0x94, 0xa6, 0xdb, + 0xcb, 0xf3, 0x8b, 0xdc, 0xfb, 0x30, 0xd9, 0x7b, 0xf4, 0xfb, 0xdb, 0x46, 0xba, 0x6d, 0x5b, 0x2e, + 0xb8, 0x6e, 0x96, 0x83, 0x34, 0x3e, 0x5f, 0x6e, 0xf4, 0x59, 0x4c, 0xff, 0x22, 0x54, 0x4e, 0x42, + 0x30, 0xbf, 0xfc, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x5f, 0x72, 0x19, 0xcd, 0x02, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/device_attributes.proto b/executor/proto/tensorflow/core/framework/device_attributes.proto new file mode 100644 index 0000000000..44236ca979 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/device_attributes.proto @@ -0,0 +1,52 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "DeviceAttributesProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; + +message InterconnectLink { + int32 device_id = 1; + string type = 2; + int32 strength = 3; +}; + +message LocalLinks { + repeated InterconnectLink link = 1; +}; + +message DeviceLocality { + // Optional bus locality of device. Default value of 0 means + // no specific locality. Specific localities are indexed from 1. + int32 bus_id = 1; + + // Optional NUMA locality of device. + int32 numa_node = 2; + + // Optional local interconnect links to other devices. + LocalLinks links = 3; +}; + +message DeviceAttributes { + // Fully specified name of the device within a cluster. + string name = 1; + + // String representation of device_type. + string device_type = 2; + + // Memory capacity of device in bytes. + int64 memory_limit = 4; + + // Platform-specific data about device that may be useful + // for supporting efficient data transfers. + DeviceLocality locality = 5; + + // A device is assigned a global unique number each time it is + // initialized. "incarnation" should never be 0. + fixed64 incarnation = 6; + + // String representation of the physical device that this device maps to. + string physical_device_desc = 7; +} diff --git a/executor/proto/tensorflow/core/framework/function.pb.go b/executor/proto/tensorflow/core/framework/function.pb.go new file mode 100644 index 0000000000..a5f6a1fa39 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/function.pb.go @@ -0,0 +1,323 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/function.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// A library is a set of named functions. +type FunctionDefLibrary struct { + Function []*FunctionDef `protobuf:"bytes,1,rep,name=function,proto3" json:"function,omitempty"` + Gradient []*GradientDef `protobuf:"bytes,2,rep,name=gradient,proto3" json:"gradient,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FunctionDefLibrary) Reset() { *m = FunctionDefLibrary{} } +func (m *FunctionDefLibrary) String() string { return proto.CompactTextString(m) } +func (*FunctionDefLibrary) ProtoMessage() {} +func (*FunctionDefLibrary) Descriptor() ([]byte, []int) { + return fileDescriptor_507748d6812c5f14, []int{0} +} + +func (m *FunctionDefLibrary) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FunctionDefLibrary.Unmarshal(m, b) +} +func (m *FunctionDefLibrary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FunctionDefLibrary.Marshal(b, m, deterministic) +} +func (m *FunctionDefLibrary) XXX_Merge(src proto.Message) { + xxx_messageInfo_FunctionDefLibrary.Merge(m, src) +} +func (m *FunctionDefLibrary) XXX_Size() int { + return xxx_messageInfo_FunctionDefLibrary.Size(m) +} +func (m *FunctionDefLibrary) XXX_DiscardUnknown() { + xxx_messageInfo_FunctionDefLibrary.DiscardUnknown(m) +} + +var xxx_messageInfo_FunctionDefLibrary proto.InternalMessageInfo + +func (m *FunctionDefLibrary) GetFunction() []*FunctionDef { + if m != nil { + return m.Function + } + return nil +} + +func (m *FunctionDefLibrary) GetGradient() []*GradientDef { + if m != nil { + return m.Gradient + } + return nil +} + +// A function can be instantiated when the runtime can bind every attr +// with a value. When a GraphDef has a call to a function, it must +// have binding for every attr defined in the signature. +// +// TODO(zhifengc): +// * device spec, etc. +type FunctionDef struct { + // The definition of the function's name, arguments, return values, + // attrs etc. + Signature *OpDef `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` + // Attributes specific to this function definition. + Attr map[string]*AttrValue `protobuf:"bytes,5,rep,name=attr,proto3" json:"attr,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ArgAttr map[uint32]*FunctionDef_ArgAttrs `protobuf:"bytes,7,rep,name=arg_attr,json=argAttr,proto3" json:"arg_attr,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // By convention, "op" in node_def is resolved by consulting with a + // user-defined library first. If not resolved, "func" is assumed to + // be a builtin op. + NodeDef []*NodeDef `protobuf:"bytes,3,rep,name=node_def,json=nodeDef,proto3" json:"node_def,omitempty"` + // A mapping from the output arg names from `signature` to the + // outputs from `node_def` that should be returned by the function. + Ret map[string]string `protobuf:"bytes,4,rep,name=ret,proto3" json:"ret,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // A mapping from control output names from `signature` to node names in + // `node_def` which should be control outputs of this function. + ControlRet map[string]string `protobuf:"bytes,6,rep,name=control_ret,json=controlRet,proto3" json:"control_ret,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FunctionDef) Reset() { *m = FunctionDef{} } +func (m *FunctionDef) String() string { return proto.CompactTextString(m) } +func (*FunctionDef) ProtoMessage() {} +func (*FunctionDef) Descriptor() ([]byte, []int) { + return fileDescriptor_507748d6812c5f14, []int{1} +} + +func (m *FunctionDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FunctionDef.Unmarshal(m, b) +} +func (m *FunctionDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FunctionDef.Marshal(b, m, deterministic) +} +func (m *FunctionDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_FunctionDef.Merge(m, src) +} +func (m *FunctionDef) XXX_Size() int { + return xxx_messageInfo_FunctionDef.Size(m) +} +func (m *FunctionDef) XXX_DiscardUnknown() { + xxx_messageInfo_FunctionDef.DiscardUnknown(m) +} + +var xxx_messageInfo_FunctionDef proto.InternalMessageInfo + +func (m *FunctionDef) GetSignature() *OpDef { + if m != nil { + return m.Signature + } + return nil +} + +func (m *FunctionDef) GetAttr() map[string]*AttrValue { + if m != nil { + return m.Attr + } + return nil +} + +func (m *FunctionDef) GetArgAttr() map[uint32]*FunctionDef_ArgAttrs { + if m != nil { + return m.ArgAttr + } + return nil +} + +func (m *FunctionDef) GetNodeDef() []*NodeDef { + if m != nil { + return m.NodeDef + } + return nil +} + +func (m *FunctionDef) GetRet() map[string]string { + if m != nil { + return m.Ret + } + return nil +} + +func (m *FunctionDef) GetControlRet() map[string]string { + if m != nil { + return m.ControlRet + } + return nil +} + +// Attributes for function arguments. These attributes are the same set of +// valid attributes as to _Arg nodes. +type FunctionDef_ArgAttrs struct { + Attr map[string]*AttrValue `protobuf:"bytes,1,rep,name=attr,proto3" json:"attr,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FunctionDef_ArgAttrs) Reset() { *m = FunctionDef_ArgAttrs{} } +func (m *FunctionDef_ArgAttrs) String() string { return proto.CompactTextString(m) } +func (*FunctionDef_ArgAttrs) ProtoMessage() {} +func (*FunctionDef_ArgAttrs) Descriptor() ([]byte, []int) { + return fileDescriptor_507748d6812c5f14, []int{1, 1} +} + +func (m *FunctionDef_ArgAttrs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FunctionDef_ArgAttrs.Unmarshal(m, b) +} +func (m *FunctionDef_ArgAttrs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FunctionDef_ArgAttrs.Marshal(b, m, deterministic) +} +func (m *FunctionDef_ArgAttrs) XXX_Merge(src proto.Message) { + xxx_messageInfo_FunctionDef_ArgAttrs.Merge(m, src) +} +func (m *FunctionDef_ArgAttrs) XXX_Size() int { + return xxx_messageInfo_FunctionDef_ArgAttrs.Size(m) +} +func (m *FunctionDef_ArgAttrs) XXX_DiscardUnknown() { + xxx_messageInfo_FunctionDef_ArgAttrs.DiscardUnknown(m) +} + +var xxx_messageInfo_FunctionDef_ArgAttrs proto.InternalMessageInfo + +func (m *FunctionDef_ArgAttrs) GetAttr() map[string]*AttrValue { + if m != nil { + return m.Attr + } + return nil +} + +// GradientDef defines the gradient function of a function defined in +// a function library. +// +// A gradient function g (specified by gradient_func) for a function f +// (specified by function_name) must follow the following: +// +// The function 'f' must be a numerical function which takes N inputs +// and produces M outputs. Its gradient function 'g', which is a +// function taking N + M inputs and produces N outputs. +// +// I.e. if we have +// (y1, y2, ..., y_M) = f(x1, x2, ..., x_N), +// then, g is +// (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N, +// dL/dy1, dL/dy2, ..., dL/dy_M), +// where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the +// loss function). dL/dx_i is the partial derivative of L with respect +// to x_i. +type GradientDef struct { + FunctionName string `protobuf:"bytes,1,opt,name=function_name,json=functionName,proto3" json:"function_name,omitempty"` + GradientFunc string `protobuf:"bytes,2,opt,name=gradient_func,json=gradientFunc,proto3" json:"gradient_func,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GradientDef) Reset() { *m = GradientDef{} } +func (m *GradientDef) String() string { return proto.CompactTextString(m) } +func (*GradientDef) ProtoMessage() {} +func (*GradientDef) Descriptor() ([]byte, []int) { + return fileDescriptor_507748d6812c5f14, []int{2} +} + +func (m *GradientDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GradientDef.Unmarshal(m, b) +} +func (m *GradientDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GradientDef.Marshal(b, m, deterministic) +} +func (m *GradientDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_GradientDef.Merge(m, src) +} +func (m *GradientDef) XXX_Size() int { + return xxx_messageInfo_GradientDef.Size(m) +} +func (m *GradientDef) XXX_DiscardUnknown() { + xxx_messageInfo_GradientDef.DiscardUnknown(m) +} + +var xxx_messageInfo_GradientDef proto.InternalMessageInfo + +func (m *GradientDef) GetFunctionName() string { + if m != nil { + return m.FunctionName + } + return "" +} + +func (m *GradientDef) GetGradientFunc() string { + if m != nil { + return m.GradientFunc + } + return "" +} + +func init() { + proto.RegisterType((*FunctionDefLibrary)(nil), "tensorflow.FunctionDefLibrary") + proto.RegisterType((*FunctionDef)(nil), "tensorflow.FunctionDef") + proto.RegisterMapType((map[uint32]*FunctionDef_ArgAttrs)(nil), "tensorflow.FunctionDef.ArgAttrEntry") + proto.RegisterMapType((map[string]*AttrValue)(nil), "tensorflow.FunctionDef.AttrEntry") + proto.RegisterMapType((map[string]string)(nil), "tensorflow.FunctionDef.ControlRetEntry") + proto.RegisterMapType((map[string]string)(nil), "tensorflow.FunctionDef.RetEntry") + proto.RegisterType((*FunctionDef_ArgAttrs)(nil), "tensorflow.FunctionDef.ArgAttrs") + proto.RegisterMapType((map[string]*AttrValue)(nil), "tensorflow.FunctionDef.ArgAttrs.AttrEntry") + proto.RegisterType((*GradientDef)(nil), "tensorflow.GradientDef") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/function.proto", fileDescriptor_507748d6812c5f14) +} + +var fileDescriptor_507748d6812c5f14 = []byte{ + // 517 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xdf, 0x8a, 0xd3, 0x40, + 0x14, 0xc6, 0x49, 0xd3, 0x6d, 0xd3, 0x93, 0xae, 0xae, 0xa3, 0x62, 0xe8, 0x55, 0xad, 0xa2, 0x65, + 0x85, 0x04, 0xba, 0xb8, 0x88, 0xb0, 0x8a, 0xeb, 0x5f, 0x44, 0xea, 0x92, 0x0b, 0x05, 0x11, 0xc2, + 0x34, 0x9d, 0xc4, 0xb0, 0xed, 0x4c, 0x99, 0x4c, 0x5d, 0x7a, 0xe3, 0x83, 0xf8, 0x0c, 0x3e, 0xa0, + 0x97, 0x32, 0x93, 0x4c, 0x32, 0xd6, 0x8d, 0x45, 0xf0, 0x6e, 0x3a, 0xf3, 0xfd, 0xbe, 0x73, 0xfa, + 0x4d, 0xe6, 0xc0, 0x58, 0x10, 0x9a, 0x33, 0x9e, 0x2c, 0xd8, 0x45, 0x10, 0x33, 0x4e, 0x82, 0x84, + 0xe3, 0x25, 0xb9, 0x60, 0xfc, 0x3c, 0x48, 0xd6, 0x34, 0x16, 0x19, 0xa3, 0xfe, 0x8a, 0x33, 0xc1, + 0x10, 0xd4, 0xca, 0xc1, 0x61, 0x33, 0x85, 0x85, 0xe0, 0xd1, 0x57, 0xbc, 0x58, 0x93, 0x82, 0x1b, + 0xfc, 0xa5, 0x02, 0x65, 0x73, 0x12, 0xcd, 0x49, 0x52, 0x2a, 0xef, 0x35, 0x2b, 0xd9, 0xaa, 0xd6, + 0x8d, 0xbe, 0x01, 0x7a, 0x55, 0xf6, 0xf6, 0x82, 0x24, 0xef, 0xb2, 0x19, 0xc7, 0x7c, 0x83, 0x8e, + 0xc0, 0xd1, 0x1d, 0x7b, 0xd6, 0xd0, 0x1e, 0xbb, 0x93, 0x5b, 0x7e, 0x6d, 0xe8, 0x1b, 0x44, 0x58, + 0x09, 0x25, 0x94, 0x72, 0x3c, 0xcf, 0x08, 0x15, 0x5e, 0xeb, 0x4f, 0xe8, 0x75, 0x79, 0xa6, 0x20, + 0x2d, 0x1c, 0xfd, 0xe8, 0x80, 0x6b, 0xd8, 0xa1, 0x00, 0x7a, 0x79, 0x96, 0x52, 0x2c, 0xd6, 0x9c, + 0x78, 0xd6, 0xd0, 0x1a, 0xbb, 0x93, 0x6b, 0xa6, 0xcb, 0xfb, 0x95, 0xe4, 0x6b, 0x0d, 0x7a, 0x08, + 0x6d, 0x19, 0x93, 0xb7, 0xa7, 0x2a, 0xde, 0x6e, 0x68, 0xd3, 0x7f, 0x26, 0x04, 0x7f, 0x49, 0x05, + 0xdf, 0x84, 0x4a, 0x8e, 0x9e, 0x82, 0x83, 0x79, 0x1a, 0x29, 0xb4, 0xab, 0xd0, 0xbb, 0x8d, 0x28, + 0x4f, 0x6b, 0xba, 0x8b, 0x8b, 0x5f, 0xc8, 0x07, 0x47, 0x47, 0xee, 0xd9, 0xca, 0xe0, 0xba, 0x69, + 0x30, 0x65, 0x73, 0x22, 0x3b, 0xed, 0xd2, 0x62, 0x81, 0x26, 0x60, 0x73, 0x22, 0xbc, 0xb6, 0x92, + 0x0e, 0x9b, 0x6a, 0x85, 0x44, 0x14, 0x75, 0xa4, 0x18, 0xbd, 0x01, 0x37, 0x66, 0x54, 0x70, 0xb6, + 0x88, 0x24, 0xdb, 0x51, 0xec, 0xfd, 0x26, 0xf6, 0x79, 0x21, 0xad, 0x2c, 0x20, 0xae, 0x36, 0x06, + 0x53, 0xe8, 0x55, 0xff, 0x01, 0x1d, 0x80, 0x7d, 0x4e, 0x36, 0x2a, 0xdd, 0x5e, 0x28, 0x97, 0xe8, + 0x01, 0xec, 0xa9, 0xcf, 0xcc, 0x6b, 0xa9, 0xc4, 0x6f, 0x9a, 0x25, 0x24, 0xf7, 0x41, 0x1e, 0x86, + 0x85, 0xe6, 0x71, 0xeb, 0x91, 0x35, 0xf8, 0x6e, 0x81, 0x53, 0xe6, 0x92, 0xa3, 0x27, 0xe5, 0x15, + 0x14, 0x5f, 0xca, 0xe1, 0x8e, 0x1c, 0xf3, 0xed, 0xbb, 0xf8, 0xef, 0xcd, 0x7d, 0x86, 0xbe, 0x79, + 0x67, 0xa6, 0xe5, 0x7e, 0x61, 0x79, 0xfc, 0xbb, 0xe5, 0x70, 0x57, 0xcb, 0xa6, 0xfb, 0x31, 0x38, + 0x3a, 0xe2, 0x4b, 0x9a, 0xbd, 0x61, 0x3a, 0xf7, 0x4c, 0xee, 0x04, 0xae, 0x6e, 0xdd, 0xd0, 0xbf, + 0xe0, 0x6f, 0xdb, 0x4e, 0xeb, 0xc0, 0x1e, 0x7d, 0x04, 0xd7, 0x78, 0x47, 0xe8, 0x0e, 0xec, 0xeb, + 0xe7, 0x17, 0x51, 0xbc, 0x24, 0xa5, 0x55, 0x5f, 0x6f, 0x4e, 0xf1, 0x92, 0x48, 0x91, 0x7e, 0x6e, + 0x91, 0x3c, 0x28, 0xbd, 0xfb, 0x7a, 0x53, 0xfe, 0xe1, 0x53, 0x0a, 0x1e, 0xe3, 0xa9, 0x99, 0x43, + 0x35, 0x30, 0x4e, 0xaf, 0xe8, 0x48, 0xce, 0xe4, 0xc8, 0xc8, 0xcf, 0xac, 0x4f, 0x27, 0x69, 0x26, + 0xbe, 0xac, 0x67, 0x7e, 0xcc, 0x96, 0x81, 0x31, 0x68, 0x2e, 0x5f, 0xa6, 0x6c, 0x6b, 0x02, 0xfd, + 0xb4, 0xac, 0x59, 0x47, 0x8d, 0x9f, 0xa3, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xab, 0xce, 0xca, + 0x73, 0x34, 0x05, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/function.proto b/executor/proto/tensorflow/core/framework/function.proto new file mode 100644 index 0000000000..7b5756ed8c --- /dev/null +++ b/executor/proto/tensorflow/core/framework/function.proto @@ -0,0 +1,113 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "FunctionProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; +import "tensorflow/core/framework/attr_value.proto"; +import "tensorflow/core/framework/node_def.proto"; +import "tensorflow/core/framework/op_def.proto"; + +// A library is a set of named functions. +message FunctionDefLibrary { + repeated FunctionDef function = 1; + repeated GradientDef gradient = 2; +} + +// A function can be instantiated when the runtime can bind every attr +// with a value. When a GraphDef has a call to a function, it must +// have binding for every attr defined in the signature. +// +// TODO(zhifengc): +// * device spec, etc. +message FunctionDef { + // The definition of the function's name, arguments, return values, + // attrs etc. + OpDef signature = 1; + + // Attributes specific to this function definition. + map attr = 5; + + // Attributes for function arguments. These attributes are the same set of + // valid attributes as to _Arg nodes. + message ArgAttrs { + map attr = 1; + } + map arg_attr = 7; + + // NOTE: field id 2 deleted on Jan 11, 2017, GraphDef version 21. + reserved 2; + + // In both of the following fields, there is the need to specify an + // output that is used as either the input to another node (in + // `node_def`) or as a return value of the function (in `ret`). + // Unlike the NodeDefs in GraphDef, we need to be able to specify a + // list in some cases (instead of just single outputs). Also, we + // need to be able to deal with lists of unknown length (so the + // output index may not be known at function definition time). So + // we use the following format instead: + // * "fun_in" where "fun_in" is the name of a function input arg in + // the `signature` field above. This represents that input, whether + // it is a single tensor or a list. + // * "fun_in:0" gives the first element of a function input arg (a + // non-list input is considered a list of length 1 for these + // purposes). + // * "node:out" where "node" is the name of a node in `node_def` and + // "out" is the name one of its op's output arguments (the name + // comes from the OpDef of the node's op). This represents that + // node's output, whether it is a single tensor or a list. + // Note: We enforce that an op's output arguments are never + // renamed in the backwards-compatibility test. + // * "node:out:0" gives the first element of a node output arg (a + // non-list output is considered a list of length 1 for these + // purposes). + // + // NOT CURRENTLY SUPPORTED (but may be in the future): + // * "node:out:-1" gives last element in a node output list + // * "node:out:1:" gives a list with all but the first element in a + // node output list + // * "node:out::-1" gives a list with all but the last element in a + // node output list + + // The body of the function. Unlike the NodeDefs in a GraphDef, attrs + // may have values of type `placeholder` and the `input` field uses + // the "output" format above. + + // By convention, "op" in node_def is resolved by consulting with a + // user-defined library first. If not resolved, "func" is assumed to + // be a builtin op. + repeated NodeDef node_def = 3; + + // A mapping from the output arg names from `signature` to the + // outputs from `node_def` that should be returned by the function. + map ret = 4; + + // A mapping from control output names from `signature` to node names in + // `node_def` which should be control outputs of this function. + map control_ret = 6; +} + +// GradientDef defines the gradient function of a function defined in +// a function library. +// +// A gradient function g (specified by gradient_func) for a function f +// (specified by function_name) must follow the following: +// +// The function 'f' must be a numerical function which takes N inputs +// and produces M outputs. Its gradient function 'g', which is a +// function taking N + M inputs and produces N outputs. +// +// I.e. if we have +// (y1, y2, ..., y_M) = f(x1, x2, ..., x_N), +// then, g is +// (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N, +// dL/dy1, dL/dy2, ..., dL/dy_M), +// where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the +// loss function). dL/dx_i is the partial derivative of L with respect +// to x_i. +message GradientDef { + string function_name = 1; // The function name. + string gradient_func = 2; // The gradient function's name. +} diff --git a/executor/proto/tensorflow/core/framework/graph.pb.go b/executor/proto/tensorflow/core/framework/graph.pb.go new file mode 100644 index 0000000000..b8692c1506 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/graph.pb.go @@ -0,0 +1,149 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/graph.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Represents the graph of operations +type GraphDef struct { + Node []*NodeDef `protobuf:"bytes,1,rep,name=node,proto3" json:"node,omitempty"` + // Compatibility versions of the graph. See core/public/version.h for version + // history. The GraphDef version is distinct from the TensorFlow version, and + // each release of TensorFlow will support a range of GraphDef versions. + Versions *VersionDef `protobuf:"bytes,4,opt,name=versions,proto3" json:"versions,omitempty"` + // Deprecated single version field; use versions above instead. Since all + // GraphDef changes before "versions" was introduced were forward + // compatible, this field is entirely ignored. + Version int32 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` // Deprecated: Do not use. + // EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET. + // + // "library" provides user-defined functions. + // + // Naming: + // * library.function.name are in a flat namespace. + // NOTE: We may need to change it to be hierarchical to support + // different orgs. E.g., + // { "/google/nn", { ... }}, + // { "/google/vision", { ... }} + // { "/org_foo/module_bar", { ... }} + // map named_lib; + // * If node[i].op is the name of one function in "library", + // node[i] is deemed as a function call. Otherwise, node[i].op + // must be a primitive operation supported by the runtime. + // + // + // Function call semantics: + // + // * The callee may start execution as soon as some of its inputs + // are ready. The caller may want to use Tuple() mechanism to + // ensure all inputs are ready in the same time. + // + // * The consumer of return values may start executing as soon as + // the return values the consumer depends on are ready. The + // consumer may want to use Tuple() mechanism to ensure the + // consumer does not start until all return values of the callee + // function are ready. + Library *FunctionDefLibrary `protobuf:"bytes,2,opt,name=library,proto3" json:"library,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GraphDef) Reset() { *m = GraphDef{} } +func (m *GraphDef) String() string { return proto.CompactTextString(m) } +func (*GraphDef) ProtoMessage() {} +func (*GraphDef) Descriptor() ([]byte, []int) { + return fileDescriptor_c7b29295d3bc875a, []int{0} +} + +func (m *GraphDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GraphDef.Unmarshal(m, b) +} +func (m *GraphDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GraphDef.Marshal(b, m, deterministic) +} +func (m *GraphDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_GraphDef.Merge(m, src) +} +func (m *GraphDef) XXX_Size() int { + return xxx_messageInfo_GraphDef.Size(m) +} +func (m *GraphDef) XXX_DiscardUnknown() { + xxx_messageInfo_GraphDef.DiscardUnknown(m) +} + +var xxx_messageInfo_GraphDef proto.InternalMessageInfo + +func (m *GraphDef) GetNode() []*NodeDef { + if m != nil { + return m.Node + } + return nil +} + +func (m *GraphDef) GetVersions() *VersionDef { + if m != nil { + return m.Versions + } + return nil +} + +// Deprecated: Do not use. +func (m *GraphDef) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *GraphDef) GetLibrary() *FunctionDefLibrary { + if m != nil { + return m.Library + } + return nil +} + +func init() { + proto.RegisterType((*GraphDef)(nil), "tensorflow.GraphDef") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/graph.proto", fileDescriptor_c7b29295d3bc875a) +} + +var fileDescriptor_c7b29295d3bc875a = []byte{ + // 268 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x2d, 0x49, 0xcd, 0x2b, + 0xce, 0x2f, 0x4a, 0xcb, 0xc9, 0x2f, 0xd7, 0x4f, 0xce, 0x2f, 0x4a, 0xd5, 0x4f, 0x2b, 0x4a, 0xcc, + 0x4d, 0x2d, 0xcf, 0x2f, 0xca, 0xd6, 0x4f, 0x2f, 0x4a, 0x2c, 0xc8, 0xd0, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0xe2, 0x42, 0x28, 0x93, 0xd2, 0xc0, 0xad, 0x25, 0x2f, 0x3f, 0x25, 0x35, 0x3e, 0x25, + 0x35, 0x0d, 0xa2, 0x0b, 0x9f, 0xca, 0xb4, 0xd2, 0xbc, 0xe4, 0x92, 0xcc, 0xfc, 0x3c, 0xc2, 0x2a, + 0xcb, 0x52, 0x8b, 0x8a, 0x33, 0xf3, 0xf3, 0x8a, 0x21, 0x2a, 0x95, 0xf6, 0x33, 0x72, 0x71, 0xb8, + 0x83, 0x5c, 0xe6, 0x92, 0x9a, 0x26, 0xa4, 0xce, 0xc5, 0x02, 0xb2, 0x52, 0x82, 0x51, 0x81, 0x59, + 0x83, 0xdb, 0x48, 0x58, 0x0f, 0x61, 0x8a, 0x9e, 0x5f, 0x7e, 0x4a, 0xaa, 0x4b, 0x6a, 0x5a, 0x10, + 0x58, 0x81, 0x90, 0x11, 0x17, 0x07, 0xcc, 0x1c, 0x09, 0x16, 0x05, 0x46, 0x0d, 0x6e, 0x23, 0x31, + 0x64, 0xc5, 0x61, 0x10, 0x39, 0x90, 0x7a, 0xb8, 0x3a, 0x21, 0x19, 0x2e, 0x76, 0x28, 0x5b, 0x82, + 0x59, 0x81, 0x51, 0x83, 0xd5, 0x89, 0x49, 0x82, 0x31, 0x08, 0x26, 0x24, 0x64, 0xc1, 0xc5, 0x9e, + 0x93, 0x99, 0x54, 0x94, 0x58, 0x54, 0x29, 0xc1, 0x04, 0x36, 0x50, 0x0e, 0xd9, 0x40, 0x37, 0xa8, + 0xf7, 0x5c, 0x52, 0xd3, 0x7c, 0x20, 0xaa, 0x82, 0x60, 0xca, 0x9d, 0xb2, 0xb9, 0x24, 0xf2, 0x8b, + 0xd2, 0x91, 0x55, 0xc3, 0x3d, 0xeb, 0xc4, 0x0d, 0xf6, 0x5a, 0x00, 0xc8, 0xa7, 0xc5, 0x01, 0x8c, + 0x51, 0xb6, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x48, 0x21, 0x84, + 0x9d, 0x99, 0x9e, 0x8f, 0x16, 0x74, 0x3f, 0x18, 0x19, 0x93, 0xd8, 0xc0, 0xa1, 0x66, 0x0c, 0x08, + 0x00, 0x00, 0xff, 0xff, 0x1f, 0x8e, 0x24, 0xc4, 0xe8, 0x01, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/graph.proto b/executor/proto/tensorflow/core/framework/graph.proto new file mode 100644 index 0000000000..76d358971d --- /dev/null +++ b/executor/proto/tensorflow/core/framework/graph.proto @@ -0,0 +1,56 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "GraphProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; +import "tensorflow/core/framework/node_def.proto"; +import "tensorflow/core/framework/function.proto"; +import "tensorflow/core/framework/versions.proto"; + +// Represents the graph of operations +message GraphDef { + repeated NodeDef node = 1; + + // Compatibility versions of the graph. See core/public/version.h for version + // history. The GraphDef version is distinct from the TensorFlow version, and + // each release of TensorFlow will support a range of GraphDef versions. + VersionDef versions = 4; + + // Deprecated single version field; use versions above instead. Since all + // GraphDef changes before "versions" was introduced were forward + // compatible, this field is entirely ignored. + int32 version = 3 [deprecated = true]; + + // EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET. + // + // "library" provides user-defined functions. + // + // Naming: + // * library.function.name are in a flat namespace. + // NOTE: We may need to change it to be hierarchical to support + // different orgs. E.g., + // { "/google/nn", { ... }}, + // { "/google/vision", { ... }} + // { "/org_foo/module_bar", { ... }} + // map named_lib; + // * If node[i].op is the name of one function in "library", + // node[i] is deemed as a function call. Otherwise, node[i].op + // must be a primitive operation supported by the runtime. + // + // + // Function call semantics: + // + // * The callee may start execution as soon as some of its inputs + // are ready. The caller may want to use Tuple() mechanism to + // ensure all inputs are ready in the same time. + // + // * The consumer of return values may start executing as soon as + // the return values the consumer depends on are ready. The + // consumer may want to use Tuple() mechanism to ensure the + // consumer does not start until all return values of the callee + // function are ready. + FunctionDefLibrary library = 2; +}; diff --git a/executor/proto/tensorflow/core/framework/graph_transfer_info.pb.go b/executor/proto/tensorflow/core/framework/graph_transfer_info.pb.go new file mode 100644 index 0000000000..b810f4f58a --- /dev/null +++ b/executor/proto/tensorflow/core/framework/graph_transfer_info.pb.go @@ -0,0 +1,609 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/graph_transfer_info.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type GraphTransferInfo_Destination int32 + +const ( + GraphTransferInfo_NOP GraphTransferInfo_Destination = 0 + GraphTransferInfo_HEXAGON GraphTransferInfo_Destination = 1 +) + +var GraphTransferInfo_Destination_name = map[int32]string{ + 0: "NOP", + 1: "HEXAGON", +} + +var GraphTransferInfo_Destination_value = map[string]int32{ + "NOP": 0, + "HEXAGON": 1, +} + +func (x GraphTransferInfo_Destination) String() string { + return proto.EnumName(GraphTransferInfo_Destination_name, int32(x)) +} + +func (GraphTransferInfo_Destination) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_c3a1e773f26c9475, []int{7, 0} +} + +type GraphTransferNodeInput struct { + NodeId int32 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + OutputPort int32 `protobuf:"varint,2,opt,name=output_port,json=outputPort,proto3" json:"output_port,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GraphTransferNodeInput) Reset() { *m = GraphTransferNodeInput{} } +func (m *GraphTransferNodeInput) String() string { return proto.CompactTextString(m) } +func (*GraphTransferNodeInput) ProtoMessage() {} +func (*GraphTransferNodeInput) Descriptor() ([]byte, []int) { + return fileDescriptor_c3a1e773f26c9475, []int{0} +} + +func (m *GraphTransferNodeInput) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GraphTransferNodeInput.Unmarshal(m, b) +} +func (m *GraphTransferNodeInput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GraphTransferNodeInput.Marshal(b, m, deterministic) +} +func (m *GraphTransferNodeInput) XXX_Merge(src proto.Message) { + xxx_messageInfo_GraphTransferNodeInput.Merge(m, src) +} +func (m *GraphTransferNodeInput) XXX_Size() int { + return xxx_messageInfo_GraphTransferNodeInput.Size(m) +} +func (m *GraphTransferNodeInput) XXX_DiscardUnknown() { + xxx_messageInfo_GraphTransferNodeInput.DiscardUnknown(m) +} + +var xxx_messageInfo_GraphTransferNodeInput proto.InternalMessageInfo + +func (m *GraphTransferNodeInput) GetNodeId() int32 { + if m != nil { + return m.NodeId + } + return 0 +} + +func (m *GraphTransferNodeInput) GetOutputPort() int32 { + if m != nil { + return m.OutputPort + } + return 0 +} + +type GraphTransferNodeInfo struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + NodeId int32 `protobuf:"varint,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + TypeName string `protobuf:"bytes,3,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + SocOpId int32 `protobuf:"varint,4,opt,name=soc_op_id,json=socOpId,proto3" json:"soc_op_id,omitempty"` + PaddingId int32 `protobuf:"varint,5,opt,name=padding_id,json=paddingId,proto3" json:"padding_id,omitempty"` + InputCount int32 `protobuf:"varint,6,opt,name=input_count,json=inputCount,proto3" json:"input_count,omitempty"` + OutputCount int32 `protobuf:"varint,7,opt,name=output_count,json=outputCount,proto3" json:"output_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GraphTransferNodeInfo) Reset() { *m = GraphTransferNodeInfo{} } +func (m *GraphTransferNodeInfo) String() string { return proto.CompactTextString(m) } +func (*GraphTransferNodeInfo) ProtoMessage() {} +func (*GraphTransferNodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_c3a1e773f26c9475, []int{1} +} + +func (m *GraphTransferNodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GraphTransferNodeInfo.Unmarshal(m, b) +} +func (m *GraphTransferNodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GraphTransferNodeInfo.Marshal(b, m, deterministic) +} +func (m *GraphTransferNodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GraphTransferNodeInfo.Merge(m, src) +} +func (m *GraphTransferNodeInfo) XXX_Size() int { + return xxx_messageInfo_GraphTransferNodeInfo.Size(m) +} +func (m *GraphTransferNodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GraphTransferNodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GraphTransferNodeInfo proto.InternalMessageInfo + +func (m *GraphTransferNodeInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GraphTransferNodeInfo) GetNodeId() int32 { + if m != nil { + return m.NodeId + } + return 0 +} + +func (m *GraphTransferNodeInfo) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *GraphTransferNodeInfo) GetSocOpId() int32 { + if m != nil { + return m.SocOpId + } + return 0 +} + +func (m *GraphTransferNodeInfo) GetPaddingId() int32 { + if m != nil { + return m.PaddingId + } + return 0 +} + +func (m *GraphTransferNodeInfo) GetInputCount() int32 { + if m != nil { + return m.InputCount + } + return 0 +} + +func (m *GraphTransferNodeInfo) GetOutputCount() int32 { + if m != nil { + return m.OutputCount + } + return 0 +} + +type GraphTransferConstNodeInfo struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + NodeId int32 `protobuf:"varint,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + Shape []int64 `protobuf:"varint,3,rep,packed,name=shape,proto3" json:"shape,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + Dtype DataType `protobuf:"varint,5,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GraphTransferConstNodeInfo) Reset() { *m = GraphTransferConstNodeInfo{} } +func (m *GraphTransferConstNodeInfo) String() string { return proto.CompactTextString(m) } +func (*GraphTransferConstNodeInfo) ProtoMessage() {} +func (*GraphTransferConstNodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_c3a1e773f26c9475, []int{2} +} + +func (m *GraphTransferConstNodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GraphTransferConstNodeInfo.Unmarshal(m, b) +} +func (m *GraphTransferConstNodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GraphTransferConstNodeInfo.Marshal(b, m, deterministic) +} +func (m *GraphTransferConstNodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GraphTransferConstNodeInfo.Merge(m, src) +} +func (m *GraphTransferConstNodeInfo) XXX_Size() int { + return xxx_messageInfo_GraphTransferConstNodeInfo.Size(m) +} +func (m *GraphTransferConstNodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GraphTransferConstNodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GraphTransferConstNodeInfo proto.InternalMessageInfo + +func (m *GraphTransferConstNodeInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GraphTransferConstNodeInfo) GetNodeId() int32 { + if m != nil { + return m.NodeId + } + return 0 +} + +func (m *GraphTransferConstNodeInfo) GetShape() []int64 { + if m != nil { + return m.Shape + } + return nil +} + +func (m *GraphTransferConstNodeInfo) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *GraphTransferConstNodeInfo) GetDtype() DataType { + if m != nil { + return m.Dtype + } + return DataType_DT_INVALID +} + +type GraphTransferNodeInputInfo struct { + NodeId int32 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + NodeInput []*GraphTransferNodeInput `protobuf:"bytes,2,rep,name=node_input,json=nodeInput,proto3" json:"node_input,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GraphTransferNodeInputInfo) Reset() { *m = GraphTransferNodeInputInfo{} } +func (m *GraphTransferNodeInputInfo) String() string { return proto.CompactTextString(m) } +func (*GraphTransferNodeInputInfo) ProtoMessage() {} +func (*GraphTransferNodeInputInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_c3a1e773f26c9475, []int{3} +} + +func (m *GraphTransferNodeInputInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GraphTransferNodeInputInfo.Unmarshal(m, b) +} +func (m *GraphTransferNodeInputInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GraphTransferNodeInputInfo.Marshal(b, m, deterministic) +} +func (m *GraphTransferNodeInputInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GraphTransferNodeInputInfo.Merge(m, src) +} +func (m *GraphTransferNodeInputInfo) XXX_Size() int { + return xxx_messageInfo_GraphTransferNodeInputInfo.Size(m) +} +func (m *GraphTransferNodeInputInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GraphTransferNodeInputInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GraphTransferNodeInputInfo proto.InternalMessageInfo + +func (m *GraphTransferNodeInputInfo) GetNodeId() int32 { + if m != nil { + return m.NodeId + } + return 0 +} + +func (m *GraphTransferNodeInputInfo) GetNodeInput() []*GraphTransferNodeInput { + if m != nil { + return m.NodeInput + } + return nil +} + +type GraphTransferNodeOutputInfo struct { + NodeId int32 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + MaxByteSize []int32 `protobuf:"varint,2,rep,packed,name=max_byte_size,json=maxByteSize,proto3" json:"max_byte_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GraphTransferNodeOutputInfo) Reset() { *m = GraphTransferNodeOutputInfo{} } +func (m *GraphTransferNodeOutputInfo) String() string { return proto.CompactTextString(m) } +func (*GraphTransferNodeOutputInfo) ProtoMessage() {} +func (*GraphTransferNodeOutputInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_c3a1e773f26c9475, []int{4} +} + +func (m *GraphTransferNodeOutputInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GraphTransferNodeOutputInfo.Unmarshal(m, b) +} +func (m *GraphTransferNodeOutputInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GraphTransferNodeOutputInfo.Marshal(b, m, deterministic) +} +func (m *GraphTransferNodeOutputInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GraphTransferNodeOutputInfo.Merge(m, src) +} +func (m *GraphTransferNodeOutputInfo) XXX_Size() int { + return xxx_messageInfo_GraphTransferNodeOutputInfo.Size(m) +} +func (m *GraphTransferNodeOutputInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GraphTransferNodeOutputInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GraphTransferNodeOutputInfo proto.InternalMessageInfo + +func (m *GraphTransferNodeOutputInfo) GetNodeId() int32 { + if m != nil { + return m.NodeId + } + return 0 +} + +func (m *GraphTransferNodeOutputInfo) GetMaxByteSize() []int32 { + if m != nil { + return m.MaxByteSize + } + return nil +} + +type GraphTransferGraphInputNodeInfo struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Shape []int64 `protobuf:"varint,2,rep,packed,name=shape,proto3" json:"shape,omitempty"` + Dtype DataType `protobuf:"varint,3,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GraphTransferGraphInputNodeInfo) Reset() { *m = GraphTransferGraphInputNodeInfo{} } +func (m *GraphTransferGraphInputNodeInfo) String() string { return proto.CompactTextString(m) } +func (*GraphTransferGraphInputNodeInfo) ProtoMessage() {} +func (*GraphTransferGraphInputNodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_c3a1e773f26c9475, []int{5} +} + +func (m *GraphTransferGraphInputNodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GraphTransferGraphInputNodeInfo.Unmarshal(m, b) +} +func (m *GraphTransferGraphInputNodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GraphTransferGraphInputNodeInfo.Marshal(b, m, deterministic) +} +func (m *GraphTransferGraphInputNodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GraphTransferGraphInputNodeInfo.Merge(m, src) +} +func (m *GraphTransferGraphInputNodeInfo) XXX_Size() int { + return xxx_messageInfo_GraphTransferGraphInputNodeInfo.Size(m) +} +func (m *GraphTransferGraphInputNodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GraphTransferGraphInputNodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GraphTransferGraphInputNodeInfo proto.InternalMessageInfo + +func (m *GraphTransferGraphInputNodeInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GraphTransferGraphInputNodeInfo) GetShape() []int64 { + if m != nil { + return m.Shape + } + return nil +} + +func (m *GraphTransferGraphInputNodeInfo) GetDtype() DataType { + if m != nil { + return m.Dtype + } + return DataType_DT_INVALID +} + +type GraphTransferGraphOutputNodeInfo struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Shape []int64 `protobuf:"varint,2,rep,packed,name=shape,proto3" json:"shape,omitempty"` + Dtype DataType `protobuf:"varint,3,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GraphTransferGraphOutputNodeInfo) Reset() { *m = GraphTransferGraphOutputNodeInfo{} } +func (m *GraphTransferGraphOutputNodeInfo) String() string { return proto.CompactTextString(m) } +func (*GraphTransferGraphOutputNodeInfo) ProtoMessage() {} +func (*GraphTransferGraphOutputNodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_c3a1e773f26c9475, []int{6} +} + +func (m *GraphTransferGraphOutputNodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GraphTransferGraphOutputNodeInfo.Unmarshal(m, b) +} +func (m *GraphTransferGraphOutputNodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GraphTransferGraphOutputNodeInfo.Marshal(b, m, deterministic) +} +func (m *GraphTransferGraphOutputNodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GraphTransferGraphOutputNodeInfo.Merge(m, src) +} +func (m *GraphTransferGraphOutputNodeInfo) XXX_Size() int { + return xxx_messageInfo_GraphTransferGraphOutputNodeInfo.Size(m) +} +func (m *GraphTransferGraphOutputNodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GraphTransferGraphOutputNodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GraphTransferGraphOutputNodeInfo proto.InternalMessageInfo + +func (m *GraphTransferGraphOutputNodeInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GraphTransferGraphOutputNodeInfo) GetShape() []int64 { + if m != nil { + return m.Shape + } + return nil +} + +func (m *GraphTransferGraphOutputNodeInfo) GetDtype() DataType { + if m != nil { + return m.Dtype + } + return DataType_DT_INVALID +} + +// Protocol buffer representing a handle to a tensorflow resource. Handles are +// not valid across executions, but can be serialized back and forth from within +// a single run. +type GraphTransferInfo struct { + NodeInfo []*GraphTransferNodeInfo `protobuf:"bytes,1,rep,name=node_info,json=nodeInfo,proto3" json:"node_info,omitempty"` + ConstNodeInfo []*GraphTransferConstNodeInfo `protobuf:"bytes,2,rep,name=const_node_info,json=constNodeInfo,proto3" json:"const_node_info,omitempty"` + NodeInputInfo []*GraphTransferNodeInputInfo `protobuf:"bytes,3,rep,name=node_input_info,json=nodeInputInfo,proto3" json:"node_input_info,omitempty"` + NodeOutputInfo []*GraphTransferNodeOutputInfo `protobuf:"bytes,4,rep,name=node_output_info,json=nodeOutputInfo,proto3" json:"node_output_info,omitempty"` + // Input Node parameters of transferred graph + GraphInputNodeInfo []*GraphTransferGraphInputNodeInfo `protobuf:"bytes,5,rep,name=graph_input_node_info,json=graphInputNodeInfo,proto3" json:"graph_input_node_info,omitempty"` + GraphOutputNodeInfo []*GraphTransferGraphOutputNodeInfo `protobuf:"bytes,6,rep,name=graph_output_node_info,json=graphOutputNodeInfo,proto3" json:"graph_output_node_info,omitempty"` + // Destination of graph transfer + Destination GraphTransferInfo_Destination `protobuf:"varint,7,opt,name=destination,proto3,enum=tensorflow.GraphTransferInfo_Destination" json:"destination,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GraphTransferInfo) Reset() { *m = GraphTransferInfo{} } +func (m *GraphTransferInfo) String() string { return proto.CompactTextString(m) } +func (*GraphTransferInfo) ProtoMessage() {} +func (*GraphTransferInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_c3a1e773f26c9475, []int{7} +} + +func (m *GraphTransferInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GraphTransferInfo.Unmarshal(m, b) +} +func (m *GraphTransferInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GraphTransferInfo.Marshal(b, m, deterministic) +} +func (m *GraphTransferInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GraphTransferInfo.Merge(m, src) +} +func (m *GraphTransferInfo) XXX_Size() int { + return xxx_messageInfo_GraphTransferInfo.Size(m) +} +func (m *GraphTransferInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GraphTransferInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GraphTransferInfo proto.InternalMessageInfo + +func (m *GraphTransferInfo) GetNodeInfo() []*GraphTransferNodeInfo { + if m != nil { + return m.NodeInfo + } + return nil +} + +func (m *GraphTransferInfo) GetConstNodeInfo() []*GraphTransferConstNodeInfo { + if m != nil { + return m.ConstNodeInfo + } + return nil +} + +func (m *GraphTransferInfo) GetNodeInputInfo() []*GraphTransferNodeInputInfo { + if m != nil { + return m.NodeInputInfo + } + return nil +} + +func (m *GraphTransferInfo) GetNodeOutputInfo() []*GraphTransferNodeOutputInfo { + if m != nil { + return m.NodeOutputInfo + } + return nil +} + +func (m *GraphTransferInfo) GetGraphInputNodeInfo() []*GraphTransferGraphInputNodeInfo { + if m != nil { + return m.GraphInputNodeInfo + } + return nil +} + +func (m *GraphTransferInfo) GetGraphOutputNodeInfo() []*GraphTransferGraphOutputNodeInfo { + if m != nil { + return m.GraphOutputNodeInfo + } + return nil +} + +func (m *GraphTransferInfo) GetDestination() GraphTransferInfo_Destination { + if m != nil { + return m.Destination + } + return GraphTransferInfo_NOP +} + +func init() { + proto.RegisterEnum("tensorflow.GraphTransferInfo_Destination", GraphTransferInfo_Destination_name, GraphTransferInfo_Destination_value) + proto.RegisterType((*GraphTransferNodeInput)(nil), "tensorflow.GraphTransferNodeInput") + proto.RegisterType((*GraphTransferNodeInfo)(nil), "tensorflow.GraphTransferNodeInfo") + proto.RegisterType((*GraphTransferConstNodeInfo)(nil), "tensorflow.GraphTransferConstNodeInfo") + proto.RegisterType((*GraphTransferNodeInputInfo)(nil), "tensorflow.GraphTransferNodeInputInfo") + proto.RegisterType((*GraphTransferNodeOutputInfo)(nil), "tensorflow.GraphTransferNodeOutputInfo") + proto.RegisterType((*GraphTransferGraphInputNodeInfo)(nil), "tensorflow.GraphTransferGraphInputNodeInfo") + proto.RegisterType((*GraphTransferGraphOutputNodeInfo)(nil), "tensorflow.GraphTransferGraphOutputNodeInfo") + proto.RegisterType((*GraphTransferInfo)(nil), "tensorflow.GraphTransferInfo") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/graph_transfer_info.proto", fileDescriptor_c3a1e773f26c9475) +} + +var fileDescriptor_c3a1e773f26c9475 = []byte{ + // 670 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x5b, 0x6f, 0xd3, 0x4c, + 0x10, 0xfd, 0x1c, 0xe7, 0xd2, 0x4c, 0x7a, 0xfb, 0x4c, 0x5b, 0xa2, 0x56, 0xa8, 0xad, 0x11, 0x50, + 0x2e, 0x4a, 0xa4, 0xf6, 0x19, 0xa4, 0x5e, 0x50, 0xa9, 0x90, 0x92, 0xb0, 0xf4, 0x01, 0xf5, 0x01, + 0x6b, 0x6b, 0xaf, 0x5d, 0x0b, 0xb2, 0x6b, 0xd9, 0x1b, 0x9a, 0xf4, 0xcf, 0xf0, 0xd7, 0xf8, 0x19, + 0x48, 0xbc, 0xa0, 0x9d, 0x75, 0x63, 0xbb, 0xb9, 0x14, 0x21, 0xf1, 0xb6, 0x97, 0x99, 0x73, 0xe6, + 0xcc, 0x1c, 0x7b, 0xe1, 0x40, 0x32, 0x9e, 0x88, 0xd8, 0xff, 0x2a, 0xae, 0xdb, 0xae, 0x88, 0x59, + 0xdb, 0x8f, 0x69, 0x9f, 0x5d, 0x8b, 0xf8, 0x4b, 0x3b, 0x88, 0x69, 0x74, 0xe5, 0xc8, 0x98, 0xf2, + 0xc4, 0x67, 0xb1, 0x13, 0x72, 0x5f, 0xb4, 0xa2, 0x58, 0x48, 0x61, 0x41, 0x96, 0xb4, 0xf9, 0x64, + 0x36, 0x80, 0x1c, 0x45, 0x2c, 0xd1, 0x29, 0x36, 0x81, 0x8d, 0x53, 0x85, 0x77, 0x9e, 0xc2, 0x75, + 0x84, 0xc7, 0xce, 0x78, 0x34, 0x90, 0xd6, 0x43, 0xa8, 0x71, 0xe1, 0x31, 0x27, 0xf4, 0x9a, 0xc6, + 0x8e, 0xb1, 0x57, 0x21, 0x55, 0xb5, 0x3d, 0xf3, 0xac, 0x6d, 0x68, 0x88, 0x81, 0x8c, 0x06, 0xd2, + 0x89, 0x44, 0x2c, 0x9b, 0x25, 0xbc, 0x04, 0x7d, 0xd4, 0x13, 0xb1, 0xb4, 0x7f, 0x18, 0xb0, 0x3e, + 0x05, 0xd4, 0x17, 0x96, 0x05, 0x65, 0x4e, 0xfb, 0x0c, 0x01, 0xeb, 0x04, 0xd7, 0x79, 0x9e, 0x52, + 0x81, 0x67, 0x0b, 0xea, 0xaa, 0x52, 0x07, 0x33, 0x4c, 0xcc, 0x58, 0x50, 0x07, 0x1d, 0x95, 0xb5, + 0x09, 0xf5, 0x44, 0xb8, 0x8e, 0x88, 0x54, 0x5e, 0x19, 0xf3, 0x6a, 0x89, 0x70, 0xbb, 0xd1, 0x99, + 0x67, 0x3d, 0x02, 0x88, 0xa8, 0xe7, 0x85, 0x3c, 0x50, 0x97, 0x15, 0xbc, 0xac, 0xa7, 0x27, 0xba, + 0xfe, 0x50, 0x29, 0x74, 0x5c, 0x31, 0xe0, 0xb2, 0x59, 0xd5, 0xf5, 0xe3, 0xd1, 0xb1, 0x3a, 0xb1, + 0x76, 0x61, 0x31, 0x15, 0xa8, 0x23, 0x6a, 0x18, 0x91, 0x8a, 0xc6, 0x10, 0xfb, 0xbb, 0x01, 0x9b, + 0x05, 0x89, 0xc7, 0x82, 0x27, 0xf2, 0xef, 0x74, 0xae, 0x41, 0x25, 0xb9, 0xa2, 0x91, 0xd2, 0x68, + 0xee, 0x99, 0x44, 0x6f, 0x14, 0x84, 0x47, 0x25, 0x45, 0x6d, 0x8b, 0x04, 0xd7, 0xd6, 0x0b, 0xa8, + 0x78, 0xaa, 0x03, 0xa8, 0x69, 0x79, 0x7f, 0xad, 0x95, 0xcd, 0xb8, 0x75, 0x42, 0x25, 0x3d, 0x1f, + 0x45, 0x8c, 0xe8, 0x10, 0x7b, 0x78, 0xa7, 0xc0, 0xf1, 0x60, 0xb1, 0xc0, 0x99, 0xc3, 0x3d, 0x04, + 0xd0, 0x17, 0x2a, 0xb4, 0x59, 0xda, 0x31, 0xf7, 0x1a, 0xfb, 0x76, 0x9e, 0x67, 0x3a, 0x28, 0xa9, + 0xf3, 0xdb, 0xa5, 0x7d, 0x01, 0x5b, 0x13, 0x41, 0x5d, 0xec, 0xdd, 0x7c, 0x6a, 0x1b, 0x96, 0xfa, + 0x74, 0xe8, 0x5c, 0x8e, 0x24, 0x73, 0x92, 0xf0, 0x86, 0x21, 0x7b, 0x85, 0x34, 0xfa, 0x74, 0x78, + 0x34, 0x92, 0xec, 0x63, 0x78, 0xc3, 0xec, 0x6b, 0xd8, 0x2e, 0x60, 0xe3, 0x06, 0x69, 0xe7, 0xf6, + 0x7e, 0xdc, 0xe2, 0x52, 0xbe, 0xc5, 0xe3, 0x76, 0x9a, 0x7f, 0xd2, 0xce, 0x9d, 0x49, 0x62, 0xad, + 0xea, 0x1f, 0x33, 0xff, 0x2a, 0xc3, 0xff, 0x05, 0x6a, 0xe4, 0x7a, 0x03, 0xf5, 0x74, 0x4e, 0xbe, + 0x68, 0x1a, 0x38, 0xa6, 0xdd, 0x7b, 0xc6, 0xe4, 0x0b, 0xb2, 0xc0, 0x6f, 0x6b, 0xed, 0xc0, 0x8a, + 0xab, 0x2c, 0xeb, 0x64, 0x28, 0x7a, 0xd8, 0x4f, 0x67, 0xa2, 0x14, 0x2c, 0x4e, 0x96, 0xdc, 0x82, + 0xe3, 0x3b, 0xb0, 0x92, 0xf9, 0x46, 0xe3, 0x99, 0xf7, 0xe0, 0x15, 0x1c, 0x49, 0x96, 0x78, 0xc1, + 0xa0, 0x1f, 0x60, 0x15, 0xf1, 0xd2, 0x0f, 0x11, 0x01, 0xcb, 0x08, 0xf8, 0x6c, 0x2e, 0x60, 0x66, + 0x34, 0xb2, 0xcc, 0x8b, 0xc6, 0xfb, 0x0c, 0xeb, 0xfa, 0xd7, 0xa9, 0x6b, 0xcc, 0x84, 0x57, 0x10, + 0xf7, 0xe5, 0x4c, 0xdc, 0x49, 0x93, 0x11, 0x2b, 0x98, 0x34, 0x1e, 0x85, 0x0d, 0x8d, 0x9f, 0xd6, + 0x9c, 0x11, 0x54, 0x91, 0xe0, 0xd5, 0x7c, 0x82, 0xa2, 0x99, 0xc8, 0x83, 0x60, 0x8a, 0xc3, 0xde, + 0x43, 0xc3, 0x63, 0x89, 0x0c, 0x39, 0x95, 0xa1, 0xe0, 0xf8, 0x63, 0x5a, 0xde, 0x7f, 0x3e, 0x13, + 0x57, 0xe5, 0xb4, 0x4e, 0xb2, 0x04, 0x92, 0xcf, 0xb6, 0x1f, 0x43, 0x23, 0x77, 0x67, 0xd5, 0xc0, + 0xec, 0x74, 0x7b, 0xab, 0xff, 0x59, 0x0d, 0xa8, 0xbd, 0x7b, 0xfb, 0xe9, 0xf0, 0xb4, 0xdb, 0x59, + 0x35, 0x8e, 0xbe, 0x41, 0x53, 0xc4, 0x41, 0x9e, 0x61, 0xfc, 0x8e, 0x1c, 0x6d, 0x4c, 0x90, 0xf5, + 0xd4, 0x9b, 0xd2, 0x33, 0x2e, 0x5e, 0x07, 0xa1, 0xbc, 0x1a, 0x5c, 0xb6, 0x5c, 0xd1, 0x6f, 0xe7, + 0xde, 0xa1, 0xe9, 0xcb, 0x40, 0xdc, 0x79, 0xa0, 0x7e, 0x1a, 0xc6, 0x65, 0x15, 0x9f, 0xa7, 0x83, + 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7a, 0x13, 0x2f, 0x8f, 0x08, 0x07, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/graph_transfer_info.proto b/executor/proto/tensorflow/core/framework/graph_transfer_info.proto new file mode 100644 index 0000000000..232297d460 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/graph_transfer_info.proto @@ -0,0 +1,69 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "GraphTransferInfoProto"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; +import "tensorflow/core/framework/types.proto"; + +message GraphTransferNodeInput { + int32 node_id = 1; + int32 output_port = 2; +} +message GraphTransferNodeInfo { + string name = 1; + int32 node_id = 2; + string type_name = 3; + int32 soc_op_id = 4; + int32 padding_id = 5; + int32 input_count = 6; + int32 output_count = 7; +}; +message GraphTransferConstNodeInfo { + string name = 1; + int32 node_id = 2; + repeated int64 shape = 3; + bytes data = 4; + DataType dtype = 5; +}; +message GraphTransferNodeInputInfo { + int32 node_id = 1; + repeated GraphTransferNodeInput node_input = 2; +}; +message GraphTransferNodeOutputInfo { + int32 node_id = 1; + repeated int32 max_byte_size = 2; +}; +message GraphTransferGraphInputNodeInfo { + string name = 1; + repeated int64 shape = 2; + DataType dtype = 3; +} + +message GraphTransferGraphOutputNodeInfo { + string name = 1; + repeated int64 shape = 2; + DataType dtype = 3; +} + +// Protocol buffer representing a handle to a tensorflow resource. Handles are +// not valid across executions, but can be serialized back and forth from within +// a single run. +message GraphTransferInfo { + enum Destination { + NOP = 0; + HEXAGON = 1; + } + + repeated GraphTransferNodeInfo node_info = 1; + repeated GraphTransferConstNodeInfo const_node_info = 2; + repeated GraphTransferNodeInputInfo node_input_info = 3; + repeated GraphTransferNodeOutputInfo node_output_info = 4; + // Input Node parameters of transferred graph + repeated GraphTransferGraphInputNodeInfo graph_input_node_info = 5; + repeated GraphTransferGraphOutputNodeInfo graph_output_node_info = 6; + // Destination of graph transfer + Destination destination = 7; +}; diff --git a/executor/proto/tensorflow/core/framework/kernel_def.pb.go b/executor/proto/tensorflow/core/framework/kernel_def.pb.go new file mode 100644 index 0000000000..b93ea4c58a --- /dev/null +++ b/executor/proto/tensorflow/core/framework/kernel_def.pb.go @@ -0,0 +1,237 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/kernel_def.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type KernelDef struct { + // Must match the name of an Op. + Op string `protobuf:"bytes,1,opt,name=op,proto3" json:"op,omitempty"` + // Type of device this kernel runs on. + DeviceType string `protobuf:"bytes,2,opt,name=device_type,json=deviceType,proto3" json:"device_type,omitempty"` + Constraint []*KernelDef_AttrConstraint `protobuf:"bytes,3,rep,name=constraint,proto3" json:"constraint,omitempty"` + // Names of the Op's input_/output_args that reside in host memory + // instead of device memory. + HostMemoryArg []string `protobuf:"bytes,4,rep,name=host_memory_arg,json=hostMemoryArg,proto3" json:"host_memory_arg,omitempty"` + // This allows experimental kernels to be registered for an op that + // won't be used unless the user specifies a "_kernel" attr with + // value matching this. + Label string `protobuf:"bytes,5,opt,name=label,proto3" json:"label,omitempty"` + // Prioritization of kernel amongst different devices. By default we assume + // priority is 0. The higher the priority the better. By default (i.e. if + // this is not set), we prefer GPU kernels over CPU. + Priority int32 `protobuf:"varint,6,opt,name=priority,proto3" json:"priority,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KernelDef) Reset() { *m = KernelDef{} } +func (m *KernelDef) String() string { return proto.CompactTextString(m) } +func (*KernelDef) ProtoMessage() {} +func (*KernelDef) Descriptor() ([]byte, []int) { + return fileDescriptor_18794e085ea7671a, []int{0} +} + +func (m *KernelDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KernelDef.Unmarshal(m, b) +} +func (m *KernelDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KernelDef.Marshal(b, m, deterministic) +} +func (m *KernelDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_KernelDef.Merge(m, src) +} +func (m *KernelDef) XXX_Size() int { + return xxx_messageInfo_KernelDef.Size(m) +} +func (m *KernelDef) XXX_DiscardUnknown() { + xxx_messageInfo_KernelDef.DiscardUnknown(m) +} + +var xxx_messageInfo_KernelDef proto.InternalMessageInfo + +func (m *KernelDef) GetOp() string { + if m != nil { + return m.Op + } + return "" +} + +func (m *KernelDef) GetDeviceType() string { + if m != nil { + return m.DeviceType + } + return "" +} + +func (m *KernelDef) GetConstraint() []*KernelDef_AttrConstraint { + if m != nil { + return m.Constraint + } + return nil +} + +func (m *KernelDef) GetHostMemoryArg() []string { + if m != nil { + return m.HostMemoryArg + } + return nil +} + +func (m *KernelDef) GetLabel() string { + if m != nil { + return m.Label + } + return "" +} + +func (m *KernelDef) GetPriority() int32 { + if m != nil { + return m.Priority + } + return 0 +} + +type KernelDef_AttrConstraint struct { + // Name of an attr from the Op. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A list of values that this kernel supports for this attr. + // Like OpDef.AttrDef.allowed_values, except for kernels instead of Ops. + AllowedValues *AttrValue `protobuf:"bytes,2,opt,name=allowed_values,json=allowedValues,proto3" json:"allowed_values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KernelDef_AttrConstraint) Reset() { *m = KernelDef_AttrConstraint{} } +func (m *KernelDef_AttrConstraint) String() string { return proto.CompactTextString(m) } +func (*KernelDef_AttrConstraint) ProtoMessage() {} +func (*KernelDef_AttrConstraint) Descriptor() ([]byte, []int) { + return fileDescriptor_18794e085ea7671a, []int{0, 0} +} + +func (m *KernelDef_AttrConstraint) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KernelDef_AttrConstraint.Unmarshal(m, b) +} +func (m *KernelDef_AttrConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KernelDef_AttrConstraint.Marshal(b, m, deterministic) +} +func (m *KernelDef_AttrConstraint) XXX_Merge(src proto.Message) { + xxx_messageInfo_KernelDef_AttrConstraint.Merge(m, src) +} +func (m *KernelDef_AttrConstraint) XXX_Size() int { + return xxx_messageInfo_KernelDef_AttrConstraint.Size(m) +} +func (m *KernelDef_AttrConstraint) XXX_DiscardUnknown() { + xxx_messageInfo_KernelDef_AttrConstraint.DiscardUnknown(m) +} + +var xxx_messageInfo_KernelDef_AttrConstraint proto.InternalMessageInfo + +func (m *KernelDef_AttrConstraint) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *KernelDef_AttrConstraint) GetAllowedValues() *AttrValue { + if m != nil { + return m.AllowedValues + } + return nil +} + +// A collection of KernelDefs +type KernelList struct { + Kernel []*KernelDef `protobuf:"bytes,1,rep,name=kernel,proto3" json:"kernel,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KernelList) Reset() { *m = KernelList{} } +func (m *KernelList) String() string { return proto.CompactTextString(m) } +func (*KernelList) ProtoMessage() {} +func (*KernelList) Descriptor() ([]byte, []int) { + return fileDescriptor_18794e085ea7671a, []int{1} +} + +func (m *KernelList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KernelList.Unmarshal(m, b) +} +func (m *KernelList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KernelList.Marshal(b, m, deterministic) +} +func (m *KernelList) XXX_Merge(src proto.Message) { + xxx_messageInfo_KernelList.Merge(m, src) +} +func (m *KernelList) XXX_Size() int { + return xxx_messageInfo_KernelList.Size(m) +} +func (m *KernelList) XXX_DiscardUnknown() { + xxx_messageInfo_KernelList.DiscardUnknown(m) +} + +var xxx_messageInfo_KernelList proto.InternalMessageInfo + +func (m *KernelList) GetKernel() []*KernelDef { + if m != nil { + return m.Kernel + } + return nil +} + +func init() { + proto.RegisterType((*KernelDef)(nil), "tensorflow.KernelDef") + proto.RegisterType((*KernelDef_AttrConstraint)(nil), "tensorflow.KernelDef.AttrConstraint") + proto.RegisterType((*KernelList)(nil), "tensorflow.KernelList") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/kernel_def.proto", fileDescriptor_18794e085ea7671a) +} + +var fileDescriptor_18794e085ea7671a = []byte{ + // 360 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xcf, 0x8a, 0xdb, 0x40, + 0x0c, 0xc6, 0xb1, 0xf3, 0x87, 0x46, 0x21, 0x09, 0x0c, 0x0d, 0x0c, 0xb9, 0xd4, 0x84, 0x52, 0x4c, + 0xa1, 0x36, 0xa4, 0xc7, 0xb6, 0x87, 0xa4, 0xb9, 0xb5, 0x85, 0x60, 0xca, 0x1e, 0xf6, 0x62, 0xc6, + 0x8e, 0xec, 0x98, 0xd8, 0x96, 0x19, 0x4f, 0x12, 0xfc, 0x76, 0xfb, 0x58, 0x7b, 0x5c, 0x3c, 0x0e, + 0x8e, 0x77, 0x09, 0x7b, 0x1b, 0x49, 0x9f, 0xa4, 0xf9, 0xf4, 0x83, 0xaf, 0x0a, 0xf3, 0x92, 0x64, + 0x94, 0xd2, 0xc5, 0x0d, 0x49, 0xa2, 0x1b, 0x49, 0x91, 0xe1, 0x85, 0xe4, 0xd1, 0x3d, 0xa2, 0xcc, + 0x31, 0xf5, 0xf7, 0x18, 0x39, 0x85, 0x24, 0x45, 0x0c, 0x6e, 0xda, 0xc5, 0x3b, 0x7d, 0x42, 0x29, + 0xe9, 0x9f, 0x45, 0x7a, 0xc2, 0xa6, 0x6f, 0xf9, 0x64, 0xc2, 0xe8, 0x8f, 0x1e, 0xb6, 0xc5, 0x88, + 0x4d, 0xc1, 0xa4, 0x82, 0x1b, 0x96, 0x61, 0x8f, 0x3c, 0x93, 0x0a, 0xf6, 0x09, 0xc6, 0x7b, 0x3c, + 0x27, 0x21, 0xfa, 0xaa, 0x2a, 0x90, 0x9b, 0xba, 0x00, 0x4d, 0xea, 0x7f, 0x55, 0x20, 0xdb, 0x02, + 0x84, 0x94, 0x97, 0x4a, 0x8a, 0x24, 0x57, 0xbc, 0x67, 0xf5, 0xec, 0xf1, 0xea, 0xb3, 0x73, 0xdb, + 0xef, 0xb4, 0xb3, 0x9d, 0xb5, 0x52, 0xf2, 0x77, 0xab, 0xf5, 0x3a, 0x7d, 0xec, 0x0b, 0xcc, 0x0e, + 0x54, 0x2a, 0x3f, 0xc3, 0x8c, 0x64, 0xe5, 0x0b, 0x19, 0xf3, 0xbe, 0xd5, 0xb3, 0x47, 0xde, 0xa4, + 0x4e, 0xff, 0xd3, 0xd9, 0xb5, 0x8c, 0xd9, 0x47, 0x18, 0xa4, 0x22, 0xc0, 0x94, 0x0f, 0xf4, 0x47, + 0x9a, 0x80, 0x2d, 0xe0, 0x43, 0x21, 0x13, 0x92, 0x89, 0xaa, 0xf8, 0xd0, 0x32, 0xec, 0x81, 0xd7, + 0xc6, 0x8b, 0x00, 0xa6, 0xaf, 0xf7, 0x32, 0x06, 0xfd, 0x5c, 0x64, 0x78, 0x35, 0xa9, 0xdf, 0xec, + 0x27, 0x4c, 0x45, 0x9a, 0xd2, 0x05, 0xf7, 0xcd, 0x6d, 0x4a, 0xed, 0x74, 0xbc, 0x9a, 0x77, 0x9d, + 0xd4, 0x73, 0x1e, 0xea, 0xaa, 0x37, 0xb9, 0x8a, 0x75, 0x54, 0x2e, 0x7f, 0x00, 0x34, 0x2e, 0xff, + 0x26, 0xa5, 0x62, 0xdf, 0x60, 0xd8, 0xc0, 0xe1, 0x86, 0xbe, 0xc6, 0xfc, 0xee, 0x35, 0xbc, 0xab, + 0x68, 0x43, 0xc0, 0x49, 0xc6, 0x5d, 0x4d, 0x0b, 0x6b, 0x33, 0x6b, 0xe5, 0xbb, 0x9a, 0x55, 0xb9, + 0x33, 0x1e, 0x7f, 0xc5, 0x89, 0x3a, 0x9c, 0x02, 0x27, 0xa4, 0xcc, 0xed, 0x50, 0xbe, 0xff, 0x8c, + 0xe9, 0x0d, 0xfe, 0x67, 0xc3, 0x08, 0x86, 0x9a, 0xfb, 0xf7, 0x97, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x4e, 0x89, 0x26, 0xb6, 0x5d, 0x02, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/kernel_def.proto b/executor/proto/tensorflow/core/framework/kernel_def.proto new file mode 100644 index 0000000000..358621dc0f --- /dev/null +++ b/executor/proto/tensorflow/core/framework/kernel_def.proto @@ -0,0 +1,46 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "KernelDefProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; +import "tensorflow/core/framework/attr_value.proto"; + +message KernelDef { + // Must match the name of an Op. + string op = 1; + + // Type of device this kernel runs on. + string device_type = 2; + + message AttrConstraint { + // Name of an attr from the Op. + string name = 1; + + // A list of values that this kernel supports for this attr. + // Like OpDef.AttrDef.allowed_values, except for kernels instead of Ops. + AttrValue allowed_values = 2; + } + repeated AttrConstraint constraint = 3; + + // Names of the Op's input_/output_args that reside in host memory + // instead of device memory. + repeated string host_memory_arg = 4; + + // This allows experimental kernels to be registered for an op that + // won't be used unless the user specifies a "_kernel" attr with + // value matching this. + string label = 5; + + // Prioritization of kernel amongst different devices. By default we assume + // priority is 0. The higher the priority the better. By default (i.e. if + // this is not set), we prefer GPU kernels over CPU. + int32 priority = 6; +} + +// A collection of KernelDefs +message KernelList { + repeated KernelDef kernel = 1; +}; diff --git a/executor/proto/tensorflow/core/framework/log_memory.pb.go b/executor/proto/tensorflow/core/framework/log_memory.pb.go new file mode 100644 index 0000000000..3ae9825556 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/log_memory.pb.go @@ -0,0 +1,456 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/log_memory.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type MemoryLogStep struct { + // Process-unique step id. + StepId int64 `protobuf:"varint,1,opt,name=step_id,json=stepId,proto3" json:"step_id,omitempty"` + // Handle describing the feeds and fetches of the step. + Handle string `protobuf:"bytes,2,opt,name=handle,proto3" json:"handle,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryLogStep) Reset() { *m = MemoryLogStep{} } +func (m *MemoryLogStep) String() string { return proto.CompactTextString(m) } +func (*MemoryLogStep) ProtoMessage() {} +func (*MemoryLogStep) Descriptor() ([]byte, []int) { + return fileDescriptor_4f52e83a3ef81427, []int{0} +} + +func (m *MemoryLogStep) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MemoryLogStep.Unmarshal(m, b) +} +func (m *MemoryLogStep) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MemoryLogStep.Marshal(b, m, deterministic) +} +func (m *MemoryLogStep) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryLogStep.Merge(m, src) +} +func (m *MemoryLogStep) XXX_Size() int { + return xxx_messageInfo_MemoryLogStep.Size(m) +} +func (m *MemoryLogStep) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryLogStep.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryLogStep proto.InternalMessageInfo + +func (m *MemoryLogStep) GetStepId() int64 { + if m != nil { + return m.StepId + } + return 0 +} + +func (m *MemoryLogStep) GetHandle() string { + if m != nil { + return m.Handle + } + return "" +} + +type MemoryLogTensorAllocation struct { + // Process-unique step id. + StepId int64 `protobuf:"varint,1,opt,name=step_id,json=stepId,proto3" json:"step_id,omitempty"` + // Name of the kernel making the allocation as set in GraphDef, + // e.g., "affine2/weights/Assign". + KernelName string `protobuf:"bytes,2,opt,name=kernel_name,json=kernelName,proto3" json:"kernel_name,omitempty"` + // Allocated tensor details. + Tensor *TensorDescription `protobuf:"bytes,3,opt,name=tensor,proto3" json:"tensor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryLogTensorAllocation) Reset() { *m = MemoryLogTensorAllocation{} } +func (m *MemoryLogTensorAllocation) String() string { return proto.CompactTextString(m) } +func (*MemoryLogTensorAllocation) ProtoMessage() {} +func (*MemoryLogTensorAllocation) Descriptor() ([]byte, []int) { + return fileDescriptor_4f52e83a3ef81427, []int{1} +} + +func (m *MemoryLogTensorAllocation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MemoryLogTensorAllocation.Unmarshal(m, b) +} +func (m *MemoryLogTensorAllocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MemoryLogTensorAllocation.Marshal(b, m, deterministic) +} +func (m *MemoryLogTensorAllocation) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryLogTensorAllocation.Merge(m, src) +} +func (m *MemoryLogTensorAllocation) XXX_Size() int { + return xxx_messageInfo_MemoryLogTensorAllocation.Size(m) +} +func (m *MemoryLogTensorAllocation) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryLogTensorAllocation.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryLogTensorAllocation proto.InternalMessageInfo + +func (m *MemoryLogTensorAllocation) GetStepId() int64 { + if m != nil { + return m.StepId + } + return 0 +} + +func (m *MemoryLogTensorAllocation) GetKernelName() string { + if m != nil { + return m.KernelName + } + return "" +} + +func (m *MemoryLogTensorAllocation) GetTensor() *TensorDescription { + if m != nil { + return m.Tensor + } + return nil +} + +type MemoryLogTensorDeallocation struct { + // Id of the tensor buffer being deallocated, used to match to a + // corresponding allocation. + AllocationId int64 `protobuf:"varint,1,opt,name=allocation_id,json=allocationId,proto3" json:"allocation_id,omitempty"` + // Name of the allocator used. + AllocatorName string `protobuf:"bytes,2,opt,name=allocator_name,json=allocatorName,proto3" json:"allocator_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryLogTensorDeallocation) Reset() { *m = MemoryLogTensorDeallocation{} } +func (m *MemoryLogTensorDeallocation) String() string { return proto.CompactTextString(m) } +func (*MemoryLogTensorDeallocation) ProtoMessage() {} +func (*MemoryLogTensorDeallocation) Descriptor() ([]byte, []int) { + return fileDescriptor_4f52e83a3ef81427, []int{2} +} + +func (m *MemoryLogTensorDeallocation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MemoryLogTensorDeallocation.Unmarshal(m, b) +} +func (m *MemoryLogTensorDeallocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MemoryLogTensorDeallocation.Marshal(b, m, deterministic) +} +func (m *MemoryLogTensorDeallocation) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryLogTensorDeallocation.Merge(m, src) +} +func (m *MemoryLogTensorDeallocation) XXX_Size() int { + return xxx_messageInfo_MemoryLogTensorDeallocation.Size(m) +} +func (m *MemoryLogTensorDeallocation) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryLogTensorDeallocation.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryLogTensorDeallocation proto.InternalMessageInfo + +func (m *MemoryLogTensorDeallocation) GetAllocationId() int64 { + if m != nil { + return m.AllocationId + } + return 0 +} + +func (m *MemoryLogTensorDeallocation) GetAllocatorName() string { + if m != nil { + return m.AllocatorName + } + return "" +} + +type MemoryLogTensorOutput struct { + // Process-unique step id. + StepId int64 `protobuf:"varint,1,opt,name=step_id,json=stepId,proto3" json:"step_id,omitempty"` + // Name of the kernel producing an output as set in GraphDef, e.g., + // "affine2/weights/Assign". + KernelName string `protobuf:"bytes,2,opt,name=kernel_name,json=kernelName,proto3" json:"kernel_name,omitempty"` + // Index of the output being set. + Index int32 `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` + // Output tensor details. + Tensor *TensorDescription `protobuf:"bytes,4,opt,name=tensor,proto3" json:"tensor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryLogTensorOutput) Reset() { *m = MemoryLogTensorOutput{} } +func (m *MemoryLogTensorOutput) String() string { return proto.CompactTextString(m) } +func (*MemoryLogTensorOutput) ProtoMessage() {} +func (*MemoryLogTensorOutput) Descriptor() ([]byte, []int) { + return fileDescriptor_4f52e83a3ef81427, []int{3} +} + +func (m *MemoryLogTensorOutput) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MemoryLogTensorOutput.Unmarshal(m, b) +} +func (m *MemoryLogTensorOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MemoryLogTensorOutput.Marshal(b, m, deterministic) +} +func (m *MemoryLogTensorOutput) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryLogTensorOutput.Merge(m, src) +} +func (m *MemoryLogTensorOutput) XXX_Size() int { + return xxx_messageInfo_MemoryLogTensorOutput.Size(m) +} +func (m *MemoryLogTensorOutput) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryLogTensorOutput.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryLogTensorOutput proto.InternalMessageInfo + +func (m *MemoryLogTensorOutput) GetStepId() int64 { + if m != nil { + return m.StepId + } + return 0 +} + +func (m *MemoryLogTensorOutput) GetKernelName() string { + if m != nil { + return m.KernelName + } + return "" +} + +func (m *MemoryLogTensorOutput) GetIndex() int32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *MemoryLogTensorOutput) GetTensor() *TensorDescription { + if m != nil { + return m.Tensor + } + return nil +} + +type MemoryLogRawAllocation struct { + // Process-unique step id. + StepId int64 `protobuf:"varint,1,opt,name=step_id,json=stepId,proto3" json:"step_id,omitempty"` + // Name of the operation making the allocation. + Operation string `protobuf:"bytes,2,opt,name=operation,proto3" json:"operation,omitempty"` + // Number of bytes in the allocation. + NumBytes int64 `protobuf:"varint,3,opt,name=num_bytes,json=numBytes,proto3" json:"num_bytes,omitempty"` + // Address of the allocation. + Ptr uint64 `protobuf:"varint,4,opt,name=ptr,proto3" json:"ptr,omitempty"` + // Id of the tensor buffer being allocated, used to match to a + // corresponding deallocation. + AllocationId int64 `protobuf:"varint,5,opt,name=allocation_id,json=allocationId,proto3" json:"allocation_id,omitempty"` + // Name of the allocator used. + AllocatorName string `protobuf:"bytes,6,opt,name=allocator_name,json=allocatorName,proto3" json:"allocator_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryLogRawAllocation) Reset() { *m = MemoryLogRawAllocation{} } +func (m *MemoryLogRawAllocation) String() string { return proto.CompactTextString(m) } +func (*MemoryLogRawAllocation) ProtoMessage() {} +func (*MemoryLogRawAllocation) Descriptor() ([]byte, []int) { + return fileDescriptor_4f52e83a3ef81427, []int{4} +} + +func (m *MemoryLogRawAllocation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MemoryLogRawAllocation.Unmarshal(m, b) +} +func (m *MemoryLogRawAllocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MemoryLogRawAllocation.Marshal(b, m, deterministic) +} +func (m *MemoryLogRawAllocation) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryLogRawAllocation.Merge(m, src) +} +func (m *MemoryLogRawAllocation) XXX_Size() int { + return xxx_messageInfo_MemoryLogRawAllocation.Size(m) +} +func (m *MemoryLogRawAllocation) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryLogRawAllocation.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryLogRawAllocation proto.InternalMessageInfo + +func (m *MemoryLogRawAllocation) GetStepId() int64 { + if m != nil { + return m.StepId + } + return 0 +} + +func (m *MemoryLogRawAllocation) GetOperation() string { + if m != nil { + return m.Operation + } + return "" +} + +func (m *MemoryLogRawAllocation) GetNumBytes() int64 { + if m != nil { + return m.NumBytes + } + return 0 +} + +func (m *MemoryLogRawAllocation) GetPtr() uint64 { + if m != nil { + return m.Ptr + } + return 0 +} + +func (m *MemoryLogRawAllocation) GetAllocationId() int64 { + if m != nil { + return m.AllocationId + } + return 0 +} + +func (m *MemoryLogRawAllocation) GetAllocatorName() string { + if m != nil { + return m.AllocatorName + } + return "" +} + +type MemoryLogRawDeallocation struct { + // Process-unique step id. + StepId int64 `protobuf:"varint,1,opt,name=step_id,json=stepId,proto3" json:"step_id,omitempty"` + // Name of the operation making the deallocation. + Operation string `protobuf:"bytes,2,opt,name=operation,proto3" json:"operation,omitempty"` + // Id of the tensor buffer being deallocated, used to match to a + // corresponding allocation. + AllocationId int64 `protobuf:"varint,3,opt,name=allocation_id,json=allocationId,proto3" json:"allocation_id,omitempty"` + // Name of the allocator used. + AllocatorName string `protobuf:"bytes,4,opt,name=allocator_name,json=allocatorName,proto3" json:"allocator_name,omitempty"` + // True if the deallocation is queued and will be performed later, + // e.g. for GPU lazy freeing of buffers. + Deferred bool `protobuf:"varint,5,opt,name=deferred,proto3" json:"deferred,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryLogRawDeallocation) Reset() { *m = MemoryLogRawDeallocation{} } +func (m *MemoryLogRawDeallocation) String() string { return proto.CompactTextString(m) } +func (*MemoryLogRawDeallocation) ProtoMessage() {} +func (*MemoryLogRawDeallocation) Descriptor() ([]byte, []int) { + return fileDescriptor_4f52e83a3ef81427, []int{5} +} + +func (m *MemoryLogRawDeallocation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MemoryLogRawDeallocation.Unmarshal(m, b) +} +func (m *MemoryLogRawDeallocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MemoryLogRawDeallocation.Marshal(b, m, deterministic) +} +func (m *MemoryLogRawDeallocation) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryLogRawDeallocation.Merge(m, src) +} +func (m *MemoryLogRawDeallocation) XXX_Size() int { + return xxx_messageInfo_MemoryLogRawDeallocation.Size(m) +} +func (m *MemoryLogRawDeallocation) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryLogRawDeallocation.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryLogRawDeallocation proto.InternalMessageInfo + +func (m *MemoryLogRawDeallocation) GetStepId() int64 { + if m != nil { + return m.StepId + } + return 0 +} + +func (m *MemoryLogRawDeallocation) GetOperation() string { + if m != nil { + return m.Operation + } + return "" +} + +func (m *MemoryLogRawDeallocation) GetAllocationId() int64 { + if m != nil { + return m.AllocationId + } + return 0 +} + +func (m *MemoryLogRawDeallocation) GetAllocatorName() string { + if m != nil { + return m.AllocatorName + } + return "" +} + +func (m *MemoryLogRawDeallocation) GetDeferred() bool { + if m != nil { + return m.Deferred + } + return false +} + +func init() { + proto.RegisterType((*MemoryLogStep)(nil), "tensorflow.MemoryLogStep") + proto.RegisterType((*MemoryLogTensorAllocation)(nil), "tensorflow.MemoryLogTensorAllocation") + proto.RegisterType((*MemoryLogTensorDeallocation)(nil), "tensorflow.MemoryLogTensorDeallocation") + proto.RegisterType((*MemoryLogTensorOutput)(nil), "tensorflow.MemoryLogTensorOutput") + proto.RegisterType((*MemoryLogRawAllocation)(nil), "tensorflow.MemoryLogRawAllocation") + proto.RegisterType((*MemoryLogRawDeallocation)(nil), "tensorflow.MemoryLogRawDeallocation") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/log_memory.proto", fileDescriptor_4f52e83a3ef81427) +} + +var fileDescriptor_4f52e83a3ef81427 = []byte{ + // 441 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0xb5, 0x38, 0x31, 0xc9, 0x94, 0x00, 0x5a, 0x41, 0x31, 0x2d, 0x88, 0x28, 0x08, 0x29, + 0xe2, 0x90, 0x48, 0x45, 0x1c, 0x91, 0x20, 0xea, 0xa5, 0x52, 0x81, 0x6a, 0xe1, 0xc4, 0xc5, 0x72, + 0xe2, 0x89, 0x6b, 0xd5, 0xbb, 0x63, 0xad, 0xd7, 0x0a, 0x7d, 0x07, 0x9e, 0x81, 0xf7, 0xe0, 0x15, + 0x78, 0x22, 0x8e, 0xc8, 0xde, 0xe0, 0x35, 0x69, 0x2a, 0x05, 0x7a, 0xf3, 0x3f, 0x9e, 0x9d, 0xf9, + 0xfe, 0x19, 0x0d, 0xbc, 0x34, 0xa8, 0x0a, 0xd2, 0xcb, 0x8c, 0x56, 0xd3, 0x05, 0x69, 0x9c, 0x2e, + 0x75, 0x24, 0x71, 0x45, 0xfa, 0x62, 0x9a, 0x51, 0x12, 0x4a, 0x94, 0xa4, 0x2f, 0x27, 0xb9, 0x26, + 0x43, 0x1c, 0x5c, 0xee, 0xc1, 0xd1, 0xf5, 0xef, 0xec, 0x9f, 0x30, 0xc6, 0x62, 0xa1, 0xd3, 0xdc, + 0xa4, 0xa4, 0xec, 0xfb, 0xd1, 0x5b, 0x18, 0xbc, 0xaf, 0xeb, 0x9d, 0x52, 0xf2, 0xc9, 0x60, 0xce, + 0x1f, 0xc1, 0xed, 0xc2, 0x60, 0x1e, 0xa6, 0x71, 0xc0, 0x86, 0x6c, 0xec, 0x09, 0xbf, 0x92, 0x27, + 0x31, 0xdf, 0x07, 0xff, 0x3c, 0x52, 0x71, 0x86, 0xc1, 0xad, 0x21, 0x1b, 0xf7, 0xc5, 0x5a, 0x8d, + 0xbe, 0x31, 0x78, 0xdc, 0x94, 0xf8, 0x5c, 0xf7, 0x79, 0x97, 0x65, 0xb4, 0x88, 0xaa, 0x2e, 0xd7, + 0x97, 0x7b, 0x06, 0x7b, 0x17, 0xa8, 0x15, 0x66, 0xa1, 0x8a, 0xe4, 0x9f, 0x9a, 0x60, 0x43, 0x1f, + 0x22, 0x89, 0xfc, 0x35, 0xf8, 0x96, 0x3a, 0xf0, 0x86, 0x6c, 0xbc, 0x77, 0xf4, 0x74, 0xe2, 0xec, + 0x4d, 0x6c, 0x9f, 0x63, 0x67, 0x47, 0xac, 0x93, 0x47, 0x29, 0x1c, 0x6e, 0xd0, 0x1c, 0x63, 0xe4, + 0x78, 0x9e, 0xc3, 0xc0, 0x29, 0x47, 0x75, 0xc7, 0x05, 0x4f, 0x62, 0xfe, 0x02, 0xee, 0xae, 0x35, + 0xe9, 0x36, 0xde, 0xa0, 0x89, 0x56, 0x84, 0xa3, 0xef, 0x0c, 0x1e, 0x6e, 0xf4, 0xfa, 0x58, 0x9a, + 0xbc, 0x34, 0x37, 0x70, 0xfd, 0x00, 0xba, 0xa9, 0x8a, 0xf1, 0x6b, 0x6d, 0xba, 0x2b, 0xac, 0x68, + 0xcd, 0xa2, 0xf3, 0x2f, 0xb3, 0xf8, 0xc9, 0x60, 0xbf, 0x01, 0x14, 0xd1, 0x6a, 0x97, 0xbd, 0x3c, + 0x81, 0x3e, 0xe5, 0xa8, 0xeb, 0xac, 0x35, 0x9f, 0x0b, 0xf0, 0x43, 0xe8, 0xab, 0x52, 0x86, 0xf3, + 0x4b, 0x83, 0x45, 0x8d, 0xe8, 0x89, 0x9e, 0x2a, 0xe5, 0xac, 0xd2, 0xfc, 0x3e, 0x78, 0xb9, 0xb1, + 0x88, 0x1d, 0x51, 0x7d, 0x5e, 0x9d, 0x76, 0x77, 0xa7, 0x69, 0xfb, 0xdb, 0xa6, 0xfd, 0x83, 0x41, + 0xd0, 0x36, 0xf3, 0xd7, 0x5a, 0xff, 0xd3, 0xce, 0x15, 0x3e, 0x6f, 0x27, 0xbe, 0xce, 0x16, 0x3e, + 0x7e, 0x00, 0xbd, 0x18, 0x97, 0xa8, 0x35, 0x5a, 0x9b, 0x3d, 0xd1, 0xe8, 0x19, 0x41, 0x40, 0x3a, + 0x69, 0x2f, 0xad, 0x39, 0xcd, 0xd9, 0xbd, 0x53, 0x4a, 0xac, 0xaf, 0xb3, 0xea, 0x22, 0x8b, 0x33, + 0xf6, 0xe5, 0x4d, 0x92, 0x9a, 0xf3, 0x72, 0x3e, 0x59, 0x90, 0x9c, 0xb6, 0x6e, 0x7a, 0xfb, 0x67, + 0x42, 0x1b, 0xc7, 0xfe, 0x8b, 0xb1, 0xb9, 0x5f, 0x5f, 0xf7, 0xab, 0xdf, 0x01, 0x00, 0x00, 0xff, + 0xff, 0xfc, 0x6c, 0x8e, 0x46, 0x4b, 0x04, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/log_memory.proto b/executor/proto/tensorflow/core/framework/log_memory.proto new file mode 100644 index 0000000000..7f37eadc3b --- /dev/null +++ b/executor/proto/tensorflow/core/framework/log_memory.proto @@ -0,0 +1,93 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "LogMemoryProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; +import "tensorflow/core/framework/tensor_description.proto"; + +message MemoryLogStep { + // Process-unique step id. + int64 step_id = 1; + + // Handle describing the feeds and fetches of the step. + string handle = 2; +}; + +message MemoryLogTensorAllocation { + // Process-unique step id. + int64 step_id = 1; + + // Name of the kernel making the allocation as set in GraphDef, + // e.g., "affine2/weights/Assign". + string kernel_name = 2; + + // Allocated tensor details. + TensorDescription tensor = 3; +}; + +message MemoryLogTensorDeallocation { + // Id of the tensor buffer being deallocated, used to match to a + // corresponding allocation. + int64 allocation_id = 1; + + // Name of the allocator used. + string allocator_name = 2; +}; + +message MemoryLogTensorOutput { + // Process-unique step id. + int64 step_id = 1; + + // Name of the kernel producing an output as set in GraphDef, e.g., + // "affine2/weights/Assign". + string kernel_name = 2; + + // Index of the output being set. + int32 index = 3; + + // Output tensor details. + TensorDescription tensor = 4; +} + +message MemoryLogRawAllocation { + // Process-unique step id. + int64 step_id = 1; + + // Name of the operation making the allocation. + string operation = 2; + + // Number of bytes in the allocation. + int64 num_bytes = 3; + + // Address of the allocation. + uint64 ptr = 4; + + // Id of the tensor buffer being allocated, used to match to a + // corresponding deallocation. + int64 allocation_id = 5; + + // Name of the allocator used. + string allocator_name = 6; +}; + +message MemoryLogRawDeallocation { + // Process-unique step id. + int64 step_id = 1; + + // Name of the operation making the deallocation. + string operation = 2; + + // Id of the tensor buffer being deallocated, used to match to a + // corresponding allocation. + int64 allocation_id = 3; + + // Name of the allocator used. + string allocator_name = 4; + + // True if the deallocation is queued and will be performed later, + // e.g. for GPU lazy freeing of buffers. + bool deferred = 5; +}; diff --git a/executor/proto/tensorflow/core/framework/node_def.pb.go b/executor/proto/tensorflow/core/framework/node_def.pb.go new file mode 100644 index 0000000000..04803dad8a --- /dev/null +++ b/executor/proto/tensorflow/core/framework/node_def.pb.go @@ -0,0 +1,242 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/node_def.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type NodeDef struct { + // The name given to this operator. Used for naming inputs, + // logging, visualization, etc. Unique within a single GraphDef. + // Must match the regexp "[A-Za-z0-9.][A-Za-z0-9_>./]*". + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The operation name. There may be custom parameters in attrs. + // Op names starting with an underscore are reserved for internal use. + Op string `protobuf:"bytes,2,opt,name=op,proto3" json:"op,omitempty"` + // Each input is "node:src_output" with "node" being a string name and + // "src_output" indicating which output tensor to use from "node". If + // "src_output" is 0 the ":0" suffix can be omitted. Regular inputs + // may optionally be followed by control inputs that have the format + // "^node". + Input []string `protobuf:"bytes,3,rep,name=input,proto3" json:"input,omitempty"` + // A (possibly partial) specification for the device on which this + // node should be placed. + // The expected syntax for this string is as follows: + // + // DEVICE_SPEC ::= PARTIAL_SPEC + // + // PARTIAL_SPEC ::= ("/" CONSTRAINT) * + // CONSTRAINT ::= ("job:" JOB_NAME) + // | ("replica:" [1-9][0-9]*) + // | ("task:" [1-9][0-9]*) + // | ("device:" [A-Za-z]* ":" ([1-9][0-9]* | "*") ) + // + // Valid values for this string include: + // * "/job:worker/replica:0/task:1/device:GPU:3" (full specification) + // * "/job:worker/device:GPU:3" (partial specification) + // * "" (no specification) + // + // If the constraints do not resolve to a single device (or if this + // field is empty or not present), the runtime will attempt to + // choose a device automatically. + Device string `protobuf:"bytes,4,opt,name=device,proto3" json:"device,omitempty"` + // Operation-specific graph-construction-time configuration. + // Note that this should include all attrs defined in the + // corresponding OpDef, including those with a value matching + // the default -- this allows the default to change and makes + // NodeDefs easier to interpret on their own. However, if + // an attr with a default is not specified in this list, the + // default will be used. + // The "names" (keys) must match the regexp "[a-z][a-z0-9_]+" (and + // one of the names from the corresponding OpDef's attr field). + // The values must have a type matching the corresponding OpDef + // attr's type field. + // TODO(josh11b): Add some examples here showing best practices. + Attr map[string]*AttrValue `protobuf:"bytes,5,rep,name=attr,proto3" json:"attr,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // This stores debug information associated with the node. + ExperimentalDebugInfo *NodeDef_ExperimentalDebugInfo `protobuf:"bytes,6,opt,name=experimental_debug_info,json=experimentalDebugInfo,proto3" json:"experimental_debug_info,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeDef) Reset() { *m = NodeDef{} } +func (m *NodeDef) String() string { return proto.CompactTextString(m) } +func (*NodeDef) ProtoMessage() {} +func (*NodeDef) Descriptor() ([]byte, []int) { + return fileDescriptor_b34b3b836a96140b, []int{0} +} + +func (m *NodeDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeDef.Unmarshal(m, b) +} +func (m *NodeDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeDef.Marshal(b, m, deterministic) +} +func (m *NodeDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeDef.Merge(m, src) +} +func (m *NodeDef) XXX_Size() int { + return xxx_messageInfo_NodeDef.Size(m) +} +func (m *NodeDef) XXX_DiscardUnknown() { + xxx_messageInfo_NodeDef.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeDef proto.InternalMessageInfo + +func (m *NodeDef) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NodeDef) GetOp() string { + if m != nil { + return m.Op + } + return "" +} + +func (m *NodeDef) GetInput() []string { + if m != nil { + return m.Input + } + return nil +} + +func (m *NodeDef) GetDevice() string { + if m != nil { + return m.Device + } + return "" +} + +func (m *NodeDef) GetAttr() map[string]*AttrValue { + if m != nil { + return m.Attr + } + return nil +} + +func (m *NodeDef) GetExperimentalDebugInfo() *NodeDef_ExperimentalDebugInfo { + if m != nil { + return m.ExperimentalDebugInfo + } + return nil +} + +type NodeDef_ExperimentalDebugInfo struct { + // Opaque string inserted into error messages created by the runtime. + // + // This is intended to store the list of names of the nodes from the + // original graph that this node was derived. For example if this node, say + // C, was result of a fusion of 2 nodes A and B, then 'original_node' would + // be {A, B}. This information can be used to map errors originating at the + // current node to some top level source code. + OriginalNodeNames []string `protobuf:"bytes,1,rep,name=original_node_names,json=originalNodeNames,proto3" json:"original_node_names,omitempty"` + // This is intended to store the list of names of the functions from the + // original graph that this node was derived. For example if this node, say + // C, was result of a fusion of node A in function FA and node B in function + // FB, then `original_funcs` would be {FA, FB}. If the node is in the top + // level graph, the `original_func` is empty. This information, with the + // `original_node_names` can be used to map errors originating at the + // current ndoe to some top level source code. + OriginalFuncNames []string `protobuf:"bytes,2,rep,name=original_func_names,json=originalFuncNames,proto3" json:"original_func_names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeDef_ExperimentalDebugInfo) Reset() { *m = NodeDef_ExperimentalDebugInfo{} } +func (m *NodeDef_ExperimentalDebugInfo) String() string { return proto.CompactTextString(m) } +func (*NodeDef_ExperimentalDebugInfo) ProtoMessage() {} +func (*NodeDef_ExperimentalDebugInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_b34b3b836a96140b, []int{0, 1} +} + +func (m *NodeDef_ExperimentalDebugInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeDef_ExperimentalDebugInfo.Unmarshal(m, b) +} +func (m *NodeDef_ExperimentalDebugInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeDef_ExperimentalDebugInfo.Marshal(b, m, deterministic) +} +func (m *NodeDef_ExperimentalDebugInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeDef_ExperimentalDebugInfo.Merge(m, src) +} +func (m *NodeDef_ExperimentalDebugInfo) XXX_Size() int { + return xxx_messageInfo_NodeDef_ExperimentalDebugInfo.Size(m) +} +func (m *NodeDef_ExperimentalDebugInfo) XXX_DiscardUnknown() { + xxx_messageInfo_NodeDef_ExperimentalDebugInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeDef_ExperimentalDebugInfo proto.InternalMessageInfo + +func (m *NodeDef_ExperimentalDebugInfo) GetOriginalNodeNames() []string { + if m != nil { + return m.OriginalNodeNames + } + return nil +} + +func (m *NodeDef_ExperimentalDebugInfo) GetOriginalFuncNames() []string { + if m != nil { + return m.OriginalFuncNames + } + return nil +} + +func init() { + proto.RegisterType((*NodeDef)(nil), "tensorflow.NodeDef") + proto.RegisterMapType((map[string]*AttrValue)(nil), "tensorflow.NodeDef.AttrEntry") + proto.RegisterType((*NodeDef_ExperimentalDebugInfo)(nil), "tensorflow.NodeDef.ExperimentalDebugInfo") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/node_def.proto", fileDescriptor_b34b3b836a96140b) +} + +var fileDescriptor_b34b3b836a96140b = []byte{ + // 374 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x4f, 0xeb, 0xd3, 0x30, + 0x18, 0xc7, 0x49, 0xbb, 0x4d, 0x9a, 0x81, 0x68, 0x74, 0x1a, 0x06, 0x42, 0xf1, 0x54, 0x15, 0x5a, + 0x9c, 0x17, 0x11, 0x3c, 0x38, 0x36, 0xc1, 0xcb, 0x18, 0x3d, 0x78, 0xf0, 0x52, 0xba, 0xf6, 0x69, + 0x0d, 0x6b, 0xf3, 0x94, 0x2c, 0xdd, 0xdc, 0x5b, 0xf3, 0x95, 0x79, 0x94, 0x64, 0x75, 0xeb, 0xa4, + 0xbf, 0xdb, 0x93, 0x3c, 0x9f, 0x6f, 0xbe, 0x79, 0xfe, 0xd0, 0x40, 0x83, 0x3c, 0xa0, 0x2a, 0x2a, + 0x3c, 0x45, 0x19, 0x2a, 0x88, 0x0a, 0x95, 0xd6, 0x70, 0x42, 0xb5, 0x8f, 0x24, 0xe6, 0x90, 0xe4, + 0x50, 0x84, 0x8d, 0x42, 0x8d, 0x8c, 0xde, 0xc8, 0xf9, 0xdb, 0x87, 0x55, 0xa9, 0xd6, 0x2a, 0x39, + 0xa6, 0x55, 0x0b, 0x17, 0xdd, 0xeb, 0xdf, 0x2e, 0x7d, 0xb4, 0xc1, 0x1c, 0x56, 0x50, 0x30, 0x46, + 0x47, 0x32, 0xad, 0x81, 0x13, 0x9f, 0x04, 0x5e, 0x6c, 0x63, 0xf6, 0x98, 0x3a, 0xd8, 0x70, 0xc7, + 0xde, 0x38, 0xd8, 0xb0, 0xe7, 0x74, 0x2c, 0x64, 0xd3, 0x6a, 0xee, 0xfa, 0x6e, 0xe0, 0xc5, 0x97, + 0x03, 0x7b, 0x41, 0x27, 0x39, 0x1c, 0x45, 0x06, 0x7c, 0x64, 0xc9, 0xee, 0xc4, 0xde, 0xd3, 0x91, + 0x71, 0xe4, 0x63, 0xdf, 0x0d, 0xa6, 0x8b, 0x57, 0xe1, 0xed, 0x63, 0x61, 0x67, 0x1a, 0x7e, 0xd1, + 0x5a, 0xad, 0xa5, 0x56, 0xe7, 0xd8, 0xa2, 0x2c, 0xa5, 0x2f, 0xe1, 0x57, 0x03, 0x4a, 0xd4, 0x20, + 0x75, 0x5a, 0x25, 0x39, 0xec, 0xda, 0x32, 0x11, 0xb2, 0x40, 0x3e, 0xf1, 0x49, 0x30, 0x5d, 0xbc, + 0x19, 0x7a, 0x65, 0xdd, 0x93, 0xac, 0x8c, 0xe2, 0x9b, 0x2c, 0x30, 0x9e, 0xc1, 0xd0, 0xf5, 0x7c, + 0x43, 0xbd, 0xab, 0x2b, 0x7b, 0x42, 0xdd, 0x3d, 0x9c, 0xbb, 0x9a, 0x4d, 0xc8, 0xde, 0xd1, 0xb1, + 0xed, 0x90, 0xad, 0x7a, 0xba, 0x98, 0xf5, 0xfd, 0x8c, 0xee, 0xbb, 0x49, 0xc6, 0x17, 0xe6, 0x93, + 0xf3, 0x91, 0xcc, 0x4f, 0x74, 0x36, 0xe8, 0xcf, 0x42, 0xfa, 0x0c, 0x95, 0x28, 0x85, 0x4c, 0xab, + 0xc4, 0xce, 0xcb, 0xb4, 0xf4, 0xc0, 0x89, 0x6d, 0xdd, 0xd3, 0x7f, 0x29, 0x53, 0xc3, 0xc6, 0x24, + 0xee, 0xf8, 0xa2, 0x95, 0x59, 0xc7, 0x3b, 0xf7, 0xfc, 0xd7, 0x56, 0x66, 0x96, 0x5f, 0x0a, 0xca, + 0x51, 0x95, 0xfd, 0xff, 0x5d, 0x27, 0xbd, 0xf4, 0xcc, 0xb3, 0x5b, 0x33, 0xe3, 0x2d, 0xf9, 0xf1, + 0xb9, 0x14, 0xfa, 0x67, 0xbb, 0x0b, 0x33, 0xac, 0xa3, 0xde, 0x72, 0x0c, 0x87, 0x25, 0xfe, 0xb7, + 0x35, 0x7f, 0x08, 0xd9, 0x4d, 0xec, 0xba, 0x7c, 0xf8, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xb5, 0xf2, + 0xb4, 0x7b, 0x92, 0x02, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/node_def.proto b/executor/proto/tensorflow/core/framework/node_def.proto new file mode 100644 index 0000000000..3c89f78991 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/node_def.proto @@ -0,0 +1,86 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "NodeProto"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; +import "tensorflow/core/framework/attr_value.proto"; + +message NodeDef { + // The name given to this operator. Used for naming inputs, + // logging, visualization, etc. Unique within a single GraphDef. + // Must match the regexp "[A-Za-z0-9.][A-Za-z0-9_>./]*". + string name = 1; + + // The operation name. There may be custom parameters in attrs. + // Op names starting with an underscore are reserved for internal use. + string op = 2; + + // Each input is "node:src_output" with "node" being a string name and + // "src_output" indicating which output tensor to use from "node". If + // "src_output" is 0 the ":0" suffix can be omitted. Regular inputs + // may optionally be followed by control inputs that have the format + // "^node". + repeated string input = 3; + + // A (possibly partial) specification for the device on which this + // node should be placed. + // The expected syntax for this string is as follows: + // + // DEVICE_SPEC ::= PARTIAL_SPEC + // + // PARTIAL_SPEC ::= ("/" CONSTRAINT) * + // CONSTRAINT ::= ("job:" JOB_NAME) + // | ("replica:" [1-9][0-9]*) + // | ("task:" [1-9][0-9]*) + // | ("device:" [A-Za-z]* ":" ([1-9][0-9]* | "*") ) + // + // Valid values for this string include: + // * "/job:worker/replica:0/task:1/device:GPU:3" (full specification) + // * "/job:worker/device:GPU:3" (partial specification) + // * "" (no specification) + // + // If the constraints do not resolve to a single device (or if this + // field is empty or not present), the runtime will attempt to + // choose a device automatically. + string device = 4; + + // Operation-specific graph-construction-time configuration. + // Note that this should include all attrs defined in the + // corresponding OpDef, including those with a value matching + // the default -- this allows the default to change and makes + // NodeDefs easier to interpret on their own. However, if + // an attr with a default is not specified in this list, the + // default will be used. + // The "names" (keys) must match the regexp "[a-z][a-z0-9_]+" (and + // one of the names from the corresponding OpDef's attr field). + // The values must have a type matching the corresponding OpDef + // attr's type field. + // TODO(josh11b): Add some examples here showing best practices. + map attr = 5; + + message ExperimentalDebugInfo { + // Opaque string inserted into error messages created by the runtime. + // + // This is intended to store the list of names of the nodes from the + // original graph that this node was derived. For example if this node, say + // C, was result of a fusion of 2 nodes A and B, then 'original_node' would + // be {A, B}. This information can be used to map errors originating at the + // current node to some top level source code. + repeated string original_node_names = 1; + + // This is intended to store the list of names of the functions from the + // original graph that this node was derived. For example if this node, say + // C, was result of a fusion of node A in function FA and node B in function + // FB, then `original_funcs` would be {FA, FB}. If the node is in the top + // level graph, the `original_func` is empty. This information, with the + // `original_node_names` can be used to map errors originating at the + // current ndoe to some top level source code. + repeated string original_func_names = 2; + }; + + // This stores debug information associated with the node. + ExperimentalDebugInfo experimental_debug_info = 6; +}; diff --git a/executor/proto/tensorflow/core/framework/op_def.pb.go b/executor/proto/tensorflow/core/framework/op_def.pb.go new file mode 100644 index 0000000000..63014c4c7c --- /dev/null +++ b/executor/proto/tensorflow/core/framework/op_def.pb.go @@ -0,0 +1,543 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/op_def.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Defines an operation. A NodeDef in a GraphDef specifies an Op by +// using the "op" field which should match the name of a OpDef. +// LINT.IfChange +type OpDef struct { + // Op names starting with an underscore are reserved for internal use. + // Names should be CamelCase and match the regexp "[A-Z][a-zA-Z0-9>_]*". + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Description of the input(s). + InputArg []*OpDef_ArgDef `protobuf:"bytes,2,rep,name=input_arg,json=inputArg,proto3" json:"input_arg,omitempty"` + // Description of the output(s). + OutputArg []*OpDef_ArgDef `protobuf:"bytes,3,rep,name=output_arg,json=outputArg,proto3" json:"output_arg,omitempty"` + // Named control outputs for this operation. Useful only for composite + // operations (i.e. functions) which want to name different control outputs. + ControlOutput []string `protobuf:"bytes,20,rep,name=control_output,json=controlOutput,proto3" json:"control_output,omitempty"` + Attr []*OpDef_AttrDef `protobuf:"bytes,4,rep,name=attr,proto3" json:"attr,omitempty"` + // Optional deprecation based on GraphDef versions. + Deprecation *OpDeprecation `protobuf:"bytes,8,opt,name=deprecation,proto3" json:"deprecation,omitempty"` + // One-line human-readable description of what the Op does. + Summary string `protobuf:"bytes,5,opt,name=summary,proto3" json:"summary,omitempty"` + // Additional, longer human-readable description of what the Op does. + Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + // True if the operation is commutative ("op(a,b) == op(b,a)" for all inputs) + IsCommutative bool `protobuf:"varint,18,opt,name=is_commutative,json=isCommutative,proto3" json:"is_commutative,omitempty"` + // If is_aggregate is true, then this operation accepts N >= 2 + // inputs and produces 1 output all of the same type. Should be + // associative and commutative, and produce output with the same + // shape as the input. The optimizer may replace an aggregate op + // taking input from multiple devices with a tree of aggregate ops + // that aggregate locally within each device (and possibly within + // groups of nearby devices) before communicating. + // TODO(josh11b): Implement that optimization. + IsAggregate bool `protobuf:"varint,16,opt,name=is_aggregate,json=isAggregate,proto3" json:"is_aggregate,omitempty"` + // Ops are marked as stateful if their behavior depends on some state beyond + // their input tensors (e.g. variable reading op) or if they have + // a side-effect (e.g. printing or asserting ops). Equivalently, stateless ops + // must always produce the same output for the same input and have + // no side-effects. + // + // By default Ops may be moved between devices. Stateful ops should + // either not be moved, or should only be moved if that state can also + // be moved (e.g. via some sort of save / restore). + // Stateful ops are guaranteed to never be optimized away by Common + // Subexpression Elimination (CSE). + IsStateful bool `protobuf:"varint,17,opt,name=is_stateful,json=isStateful,proto3" json:"is_stateful,omitempty"` + // By default, all inputs to an Op must be initialized Tensors. Ops + // that may initialize tensors for the first time should set this + // field to true, to allow the Op to take an uninitialized Tensor as + // input. + AllowsUninitializedInput bool `protobuf:"varint,19,opt,name=allows_uninitialized_input,json=allowsUninitializedInput,proto3" json:"allows_uninitialized_input,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OpDef) Reset() { *m = OpDef{} } +func (m *OpDef) String() string { return proto.CompactTextString(m) } +func (*OpDef) ProtoMessage() {} +func (*OpDef) Descriptor() ([]byte, []int) { + return fileDescriptor_0a0e27face061c12, []int{0} +} + +func (m *OpDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OpDef.Unmarshal(m, b) +} +func (m *OpDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OpDef.Marshal(b, m, deterministic) +} +func (m *OpDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpDef.Merge(m, src) +} +func (m *OpDef) XXX_Size() int { + return xxx_messageInfo_OpDef.Size(m) +} +func (m *OpDef) XXX_DiscardUnknown() { + xxx_messageInfo_OpDef.DiscardUnknown(m) +} + +var xxx_messageInfo_OpDef proto.InternalMessageInfo + +func (m *OpDef) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *OpDef) GetInputArg() []*OpDef_ArgDef { + if m != nil { + return m.InputArg + } + return nil +} + +func (m *OpDef) GetOutputArg() []*OpDef_ArgDef { + if m != nil { + return m.OutputArg + } + return nil +} + +func (m *OpDef) GetControlOutput() []string { + if m != nil { + return m.ControlOutput + } + return nil +} + +func (m *OpDef) GetAttr() []*OpDef_AttrDef { + if m != nil { + return m.Attr + } + return nil +} + +func (m *OpDef) GetDeprecation() *OpDeprecation { + if m != nil { + return m.Deprecation + } + return nil +} + +func (m *OpDef) GetSummary() string { + if m != nil { + return m.Summary + } + return "" +} + +func (m *OpDef) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *OpDef) GetIsCommutative() bool { + if m != nil { + return m.IsCommutative + } + return false +} + +func (m *OpDef) GetIsAggregate() bool { + if m != nil { + return m.IsAggregate + } + return false +} + +func (m *OpDef) GetIsStateful() bool { + if m != nil { + return m.IsStateful + } + return false +} + +func (m *OpDef) GetAllowsUninitializedInput() bool { + if m != nil { + return m.AllowsUninitializedInput + } + return false +} + +// For describing inputs and outputs. +type OpDef_ArgDef struct { + // Name for the input/output. Should match the regexp "[a-z][a-z0-9_]*". + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Human readable description. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // Describes the type of one or more tensors that are accepted/produced + // by this input/output arg. The only legal combinations are: + // * For a single tensor: either the "type" field is set or the + // "type_attr" field is set to the name of an attr with type "type". + // * For a sequence of tensors with the same type: the "number_attr" + // field will be set to the name of an attr with type "int", and + // either the "type" or "type_attr" field will be set as for + // single tensors. + // * For a sequence of tensors, the "type_list_attr" field will be set + // to the name of an attr with type "list(type)". + Type DataType `protobuf:"varint,3,opt,name=type,proto3,enum=tensorflow.DataType" json:"type,omitempty"` + TypeAttr string `protobuf:"bytes,4,opt,name=type_attr,json=typeAttr,proto3" json:"type_attr,omitempty"` + NumberAttr string `protobuf:"bytes,5,opt,name=number_attr,json=numberAttr,proto3" json:"number_attr,omitempty"` + // If specified, attr must have type "list(type)", and none of + // type, type_attr, and number_attr may be specified. + TypeListAttr string `protobuf:"bytes,6,opt,name=type_list_attr,json=typeListAttr,proto3" json:"type_list_attr,omitempty"` + // For inputs: if true, the inputs are required to be refs. + // By default, inputs can be either refs or non-refs. + // For outputs: if true, outputs are refs, otherwise they are not. + IsRef bool `protobuf:"varint,16,opt,name=is_ref,json=isRef,proto3" json:"is_ref,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OpDef_ArgDef) Reset() { *m = OpDef_ArgDef{} } +func (m *OpDef_ArgDef) String() string { return proto.CompactTextString(m) } +func (*OpDef_ArgDef) ProtoMessage() {} +func (*OpDef_ArgDef) Descriptor() ([]byte, []int) { + return fileDescriptor_0a0e27face061c12, []int{0, 0} +} + +func (m *OpDef_ArgDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OpDef_ArgDef.Unmarshal(m, b) +} +func (m *OpDef_ArgDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OpDef_ArgDef.Marshal(b, m, deterministic) +} +func (m *OpDef_ArgDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpDef_ArgDef.Merge(m, src) +} +func (m *OpDef_ArgDef) XXX_Size() int { + return xxx_messageInfo_OpDef_ArgDef.Size(m) +} +func (m *OpDef_ArgDef) XXX_DiscardUnknown() { + xxx_messageInfo_OpDef_ArgDef.DiscardUnknown(m) +} + +var xxx_messageInfo_OpDef_ArgDef proto.InternalMessageInfo + +func (m *OpDef_ArgDef) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *OpDef_ArgDef) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *OpDef_ArgDef) GetType() DataType { + if m != nil { + return m.Type + } + return DataType_DT_INVALID +} + +func (m *OpDef_ArgDef) GetTypeAttr() string { + if m != nil { + return m.TypeAttr + } + return "" +} + +func (m *OpDef_ArgDef) GetNumberAttr() string { + if m != nil { + return m.NumberAttr + } + return "" +} + +func (m *OpDef_ArgDef) GetTypeListAttr() string { + if m != nil { + return m.TypeListAttr + } + return "" +} + +func (m *OpDef_ArgDef) GetIsRef() bool { + if m != nil { + return m.IsRef + } + return false +} + +// Description of the graph-construction-time configuration of this +// Op. That is to say, this describes the attr fields that will +// be specified in the NodeDef. +type OpDef_AttrDef struct { + // A descriptive name for the argument. May be used, e.g. by the + // Python client, as a keyword argument name, and so should match + // the regexp "[a-z][a-z0-9_]+". + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // One of the type names from attr_value.proto ("string", "list(string)", + // "int", etc.). + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + // A reasonable default for this attribute if the user does not supply + // a value. If not specified, the user must supply a value. + DefaultValue *AttrValue `protobuf:"bytes,3,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + // Human-readable description. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + // For type == "int", this is a minimum value. For "list(___)" + // types, this is the minimum length. + HasMinimum bool `protobuf:"varint,5,opt,name=has_minimum,json=hasMinimum,proto3" json:"has_minimum,omitempty"` + Minimum int64 `protobuf:"varint,6,opt,name=minimum,proto3" json:"minimum,omitempty"` + // The set of allowed values. Has type that is the "list" version + // of the "type" field above (uses the "list" field of AttrValue). + // If type == "type" or "list(type)" above, then the "type" field + // of "allowed_values.list" has the set of allowed DataTypes. + // If type == "string" or "list(string)", then the "s" field of + // "allowed_values.list" has the set of allowed strings. + AllowedValues *AttrValue `protobuf:"bytes,7,opt,name=allowed_values,json=allowedValues,proto3" json:"allowed_values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OpDef_AttrDef) Reset() { *m = OpDef_AttrDef{} } +func (m *OpDef_AttrDef) String() string { return proto.CompactTextString(m) } +func (*OpDef_AttrDef) ProtoMessage() {} +func (*OpDef_AttrDef) Descriptor() ([]byte, []int) { + return fileDescriptor_0a0e27face061c12, []int{0, 1} +} + +func (m *OpDef_AttrDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OpDef_AttrDef.Unmarshal(m, b) +} +func (m *OpDef_AttrDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OpDef_AttrDef.Marshal(b, m, deterministic) +} +func (m *OpDef_AttrDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpDef_AttrDef.Merge(m, src) +} +func (m *OpDef_AttrDef) XXX_Size() int { + return xxx_messageInfo_OpDef_AttrDef.Size(m) +} +func (m *OpDef_AttrDef) XXX_DiscardUnknown() { + xxx_messageInfo_OpDef_AttrDef.DiscardUnknown(m) +} + +var xxx_messageInfo_OpDef_AttrDef proto.InternalMessageInfo + +func (m *OpDef_AttrDef) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *OpDef_AttrDef) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *OpDef_AttrDef) GetDefaultValue() *AttrValue { + if m != nil { + return m.DefaultValue + } + return nil +} + +func (m *OpDef_AttrDef) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *OpDef_AttrDef) GetHasMinimum() bool { + if m != nil { + return m.HasMinimum + } + return false +} + +func (m *OpDef_AttrDef) GetMinimum() int64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *OpDef_AttrDef) GetAllowedValues() *AttrValue { + if m != nil { + return m.AllowedValues + } + return nil +} + +// Information about version-dependent deprecation of an op +type OpDeprecation struct { + // First GraphDef version at which the op is disallowed. + Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // Explanation of why it was deprecated and what to use instead. + Explanation string `protobuf:"bytes,2,opt,name=explanation,proto3" json:"explanation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OpDeprecation) Reset() { *m = OpDeprecation{} } +func (m *OpDeprecation) String() string { return proto.CompactTextString(m) } +func (*OpDeprecation) ProtoMessage() {} +func (*OpDeprecation) Descriptor() ([]byte, []int) { + return fileDescriptor_0a0e27face061c12, []int{1} +} + +func (m *OpDeprecation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OpDeprecation.Unmarshal(m, b) +} +func (m *OpDeprecation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OpDeprecation.Marshal(b, m, deterministic) +} +func (m *OpDeprecation) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpDeprecation.Merge(m, src) +} +func (m *OpDeprecation) XXX_Size() int { + return xxx_messageInfo_OpDeprecation.Size(m) +} +func (m *OpDeprecation) XXX_DiscardUnknown() { + xxx_messageInfo_OpDeprecation.DiscardUnknown(m) +} + +var xxx_messageInfo_OpDeprecation proto.InternalMessageInfo + +func (m *OpDeprecation) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *OpDeprecation) GetExplanation() string { + if m != nil { + return m.Explanation + } + return "" +} + +// A collection of OpDefs +type OpList struct { + Op []*OpDef `protobuf:"bytes,1,rep,name=op,proto3" json:"op,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OpList) Reset() { *m = OpList{} } +func (m *OpList) String() string { return proto.CompactTextString(m) } +func (*OpList) ProtoMessage() {} +func (*OpList) Descriptor() ([]byte, []int) { + return fileDescriptor_0a0e27face061c12, []int{2} +} + +func (m *OpList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OpList.Unmarshal(m, b) +} +func (m *OpList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OpList.Marshal(b, m, deterministic) +} +func (m *OpList) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpList.Merge(m, src) +} +func (m *OpList) XXX_Size() int { + return xxx_messageInfo_OpList.Size(m) +} +func (m *OpList) XXX_DiscardUnknown() { + xxx_messageInfo_OpList.DiscardUnknown(m) +} + +var xxx_messageInfo_OpList proto.InternalMessageInfo + +func (m *OpList) GetOp() []*OpDef { + if m != nil { + return m.Op + } + return nil +} + +func init() { + proto.RegisterType((*OpDef)(nil), "tensorflow.OpDef") + proto.RegisterType((*OpDef_ArgDef)(nil), "tensorflow.OpDef.ArgDef") + proto.RegisterType((*OpDef_AttrDef)(nil), "tensorflow.OpDef.AttrDef") + proto.RegisterType((*OpDeprecation)(nil), "tensorflow.OpDeprecation") + proto.RegisterType((*OpList)(nil), "tensorflow.OpList") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/op_def.proto", fileDescriptor_0a0e27face061c12) +} + +var fileDescriptor_0a0e27face061c12 = []byte{ + // 668 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x94, 0x5d, 0x6f, 0xd3, 0x3c, + 0x14, 0xc7, 0x95, 0xbe, 0xf7, 0x64, 0xad, 0x9e, 0xf9, 0xd9, 0x24, 0x53, 0x2e, 0x96, 0x4d, 0x0c, + 0x55, 0x20, 0x5a, 0x69, 0x08, 0x21, 0xc1, 0xb8, 0xd8, 0xd8, 0x0d, 0x02, 0xb4, 0x29, 0xbc, 0x5c, + 0x70, 0x13, 0x79, 0xad, 0x93, 0x59, 0x4b, 0xe2, 0xc8, 0x76, 0x36, 0xc6, 0x27, 0xe0, 0x7b, 0xf2, + 0x15, 0xb8, 0xe0, 0x12, 0xf9, 0x38, 0xdd, 0xc2, 0xda, 0xc1, 0x55, 0xe3, 0xff, 0xf9, 0x9d, 0x53, + 0xff, 0xcf, 0xb1, 0x0d, 0x0f, 0x0d, 0xcf, 0xb5, 0x54, 0x71, 0x2a, 0x2f, 0xa7, 0x33, 0xa9, 0xf8, + 0x34, 0x56, 0x2c, 0xe3, 0x97, 0x52, 0x9d, 0x4f, 0x65, 0x11, 0xcd, 0x79, 0x3c, 0x29, 0x94, 0x34, + 0x92, 0xc0, 0x0d, 0x37, 0x7a, 0x74, 0x77, 0x0e, 0x33, 0x46, 0x45, 0x17, 0x2c, 0x2d, 0xb9, 0xcb, + 0x1b, 0xed, 0xde, 0xcd, 0x9a, 0xab, 0x82, 0x6b, 0x87, 0xed, 0xfc, 0xec, 0x42, 0xfb, 0xb8, 0x38, + 0xe2, 0x31, 0x21, 0xd0, 0xca, 0x59, 0xc6, 0xa9, 0x17, 0x78, 0xe3, 0x7e, 0x88, 0xdf, 0xe4, 0x19, + 0xf4, 0x45, 0x5e, 0x94, 0x26, 0x62, 0x2a, 0xa1, 0x8d, 0xa0, 0x39, 0xf6, 0xf7, 0xe8, 0xe4, 0xa6, + 0xf0, 0x04, 0x33, 0x27, 0x07, 0x2a, 0x39, 0xe2, 0x71, 0xd8, 0x43, 0xf4, 0x40, 0x25, 0xe4, 0x39, + 0x80, 0x2c, 0xcd, 0x22, 0xaf, 0xf9, 0x8f, 0xbc, 0xbe, 0x63, 0x6d, 0xe2, 0x2e, 0x0c, 0x67, 0x32, + 0x37, 0x4a, 0xa6, 0x91, 0x13, 0xe9, 0x46, 0xd0, 0x1c, 0xf7, 0xc3, 0x41, 0xa5, 0x1e, 0xa3, 0x48, + 0x9e, 0x40, 0xcb, 0xfa, 0xa5, 0x2d, 0xac, 0x7c, 0x6f, 0x45, 0x65, 0x63, 0x94, 0x2d, 0x8d, 0x18, + 0x79, 0x09, 0xfe, 0x9c, 0x17, 0x8a, 0xcf, 0x98, 0x11, 0x32, 0xa7, 0xbd, 0xc0, 0x5b, 0x95, 0x75, + 0x0d, 0x84, 0x75, 0x9a, 0x50, 0xe8, 0xea, 0x32, 0xcb, 0x98, 0xba, 0xa2, 0x6d, 0xec, 0xcc, 0x62, + 0x49, 0x02, 0x5b, 0x56, 0xcf, 0x94, 0x28, 0xb0, 0x6c, 0x07, 0xa3, 0x75, 0xc9, 0xda, 0x11, 0x3a, + 0x9a, 0xc9, 0x2c, 0x2b, 0x0d, 0x33, 0xe2, 0x82, 0x53, 0x12, 0x78, 0xe3, 0x5e, 0x38, 0x10, 0xfa, + 0xf5, 0x8d, 0x48, 0xb6, 0x61, 0x4d, 0xe8, 0x88, 0x25, 0x89, 0xe2, 0x09, 0x33, 0x9c, 0xfe, 0x87, + 0x90, 0x2f, 0xf4, 0xc1, 0x42, 0x22, 0x5b, 0xe0, 0x0b, 0x1d, 0x69, 0xc3, 0x0c, 0x8f, 0xcb, 0x94, + 0xae, 0x23, 0x01, 0x42, 0x7f, 0xa8, 0x14, 0xb2, 0x0f, 0x23, 0x96, 0xa6, 0xf2, 0x52, 0x47, 0x65, + 0x2e, 0x72, 0x61, 0x04, 0x4b, 0xc5, 0x37, 0x3e, 0x8f, 0x70, 0x26, 0xf4, 0x7f, 0xe4, 0xa9, 0x23, + 0x3e, 0xd5, 0x81, 0x37, 0x36, 0x3e, 0xfa, 0xe1, 0x41, 0xc7, 0x4d, 0x63, 0xe5, 0x31, 0xb8, 0xe5, + 0xb4, 0xb1, 0xec, 0x74, 0x0c, 0x2d, 0x7b, 0xaa, 0x68, 0x33, 0xf0, 0xc6, 0xc3, 0xbd, 0x8d, 0x7a, + 0x6f, 0x8f, 0x98, 0x61, 0x1f, 0xaf, 0x0a, 0x1e, 0x22, 0x41, 0xee, 0x43, 0xdf, 0xfe, 0x46, 0xd5, + 0x00, 0x6d, 0xa5, 0x9e, 0x15, 0xec, 0xc8, 0xac, 0xcd, 0xbc, 0xcc, 0x4e, 0xb9, 0x72, 0x61, 0xd7, + 0x70, 0x70, 0x12, 0x02, 0x0f, 0x60, 0x88, 0xd9, 0xa9, 0xd0, 0xc6, 0x31, 0xae, 0xed, 0x6b, 0x56, + 0x7d, 0x27, 0xb4, 0x41, 0x6a, 0x13, 0x3a, 0x42, 0x47, 0x8a, 0xc7, 0x55, 0x2b, 0xdb, 0x42, 0x87, + 0x3c, 0x1e, 0x7d, 0x6f, 0x40, 0xb7, 0x3a, 0x19, 0x2b, 0x6d, 0x92, 0xca, 0x84, 0xf3, 0xe7, 0xb6, + 0xfb, 0x02, 0x06, 0x73, 0x1e, 0xb3, 0x32, 0x35, 0xee, 0x76, 0xa1, 0x43, 0x7f, 0x6f, 0xb3, 0xee, + 0xd0, 0xd6, 0xfc, 0x6c, 0x83, 0xe1, 0x5a, 0xc5, 0xe2, 0xea, 0x76, 0xdb, 0x5a, 0xcb, 0x6d, 0xdb, + 0x02, 0xff, 0x8c, 0xe9, 0x28, 0x13, 0xb9, 0xc8, 0xca, 0x0c, 0xfd, 0xf6, 0x42, 0x38, 0x63, 0xfa, + 0xbd, 0x53, 0xec, 0xe9, 0x5b, 0x04, 0xad, 0xd1, 0x66, 0xb8, 0x58, 0x92, 0x7d, 0x18, 0xe2, 0x38, + 0xf9, 0xdc, 0x6d, 0x4c, 0xd3, 0xee, 0xdf, 0x76, 0x36, 0xa8, 0x60, 0x5c, 0xe9, 0x9d, 0xb7, 0x30, + 0xf8, 0xe3, 0xcc, 0xdb, 0x3f, 0xba, 0xe0, 0x4a, 0xdb, 0x7d, 0xda, 0x96, 0xb4, 0xc3, 0xc5, 0xd2, + 0xba, 0xe0, 0x5f, 0x8b, 0x94, 0xe5, 0xac, 0x3e, 0xfc, 0x9a, 0xb4, 0xf3, 0x18, 0x3a, 0xc7, 0x85, + 0x6d, 0x3e, 0xd9, 0x86, 0x86, 0x2c, 0xa8, 0x87, 0xd7, 0x72, 0x7d, 0xe9, 0x5a, 0x86, 0x0d, 0x59, + 0x1c, 0x9e, 0x03, 0x95, 0x2a, 0xa9, 0xc7, 0xae, 0x1f, 0xa6, 0x43, 0x1f, 0xb1, 0x13, 0xfb, 0x30, + 0xe9, 0x13, 0xef, 0xcb, 0xab, 0x44, 0x98, 0xb3, 0xf2, 0x74, 0x32, 0x93, 0xd9, 0xb4, 0xf6, 0x9a, + 0xad, 0xfe, 0x4c, 0xe4, 0xad, 0x67, 0xee, 0x97, 0xe7, 0x9d, 0x76, 0xf0, 0x91, 0x7b, 0xfa, 0x3b, + 0x00, 0x00, 0xff, 0xff, 0xfb, 0xb4, 0x9a, 0x82, 0x6d, 0x05, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/op_def.proto b/executor/proto/tensorflow/core/framework/op_def.proto new file mode 100644 index 0000000000..9b65e23e55 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/op_def.proto @@ -0,0 +1,170 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "OpDefProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; +import "tensorflow/core/framework/attr_value.proto"; +import "tensorflow/core/framework/types.proto"; + +// Defines an operation. A NodeDef in a GraphDef specifies an Op by +// using the "op" field which should match the name of a OpDef. +// LINT.IfChange +message OpDef { + // Op names starting with an underscore are reserved for internal use. + // Names should be CamelCase and match the regexp "[A-Z][a-zA-Z0-9>_]*". + string name = 1; + + // For describing inputs and outputs. + message ArgDef { + // Name for the input/output. Should match the regexp "[a-z][a-z0-9_]*". + string name = 1; + + // Human readable description. + string description = 2; + + // Describes the type of one or more tensors that are accepted/produced + // by this input/output arg. The only legal combinations are: + // * For a single tensor: either the "type" field is set or the + // "type_attr" field is set to the name of an attr with type "type". + // * For a sequence of tensors with the same type: the "number_attr" + // field will be set to the name of an attr with type "int", and + // either the "type" or "type_attr" field will be set as for + // single tensors. + // * For a sequence of tensors, the "type_list_attr" field will be set + // to the name of an attr with type "list(type)". + DataType type = 3; + string type_attr = 4; // if specified, attr must have type "type" + string number_attr = 5; // if specified, attr must have type "int" + // If specified, attr must have type "list(type)", and none of + // type, type_attr, and number_attr may be specified. + string type_list_attr = 6; + + // For inputs: if true, the inputs are required to be refs. + // By default, inputs can be either refs or non-refs. + // For outputs: if true, outputs are refs, otherwise they are not. + bool is_ref = 16; + }; + + // Description of the input(s). + repeated ArgDef input_arg = 2; + + // Description of the output(s). + repeated ArgDef output_arg = 3; + + // Named control outputs for this operation. Useful only for composite + // operations (i.e. functions) which want to name different control outputs. + repeated string control_output = 20; + + // Description of the graph-construction-time configuration of this + // Op. That is to say, this describes the attr fields that will + // be specified in the NodeDef. + message AttrDef { + // A descriptive name for the argument. May be used, e.g. by the + // Python client, as a keyword argument name, and so should match + // the regexp "[a-z][a-z0-9_]+". + string name = 1; + + // One of the type names from attr_value.proto ("string", "list(string)", + // "int", etc.). + string type = 2; + + // A reasonable default for this attribute if the user does not supply + // a value. If not specified, the user must supply a value. + AttrValue default_value = 3; + + // Human-readable description. + string description = 4; + + // TODO(josh11b): bool is_optional? + + // --- Constraints --- + // These constraints are only in effect if specified. Default is no + // constraints. + + // For type == "int", this is a minimum value. For "list(___)" + // types, this is the minimum length. + bool has_minimum = 5; + int64 minimum = 6; + + // The set of allowed values. Has type that is the "list" version + // of the "type" field above (uses the "list" field of AttrValue). + // If type == "type" or "list(type)" above, then the "type" field + // of "allowed_values.list" has the set of allowed DataTypes. + // If type == "string" or "list(string)", then the "s" field of + // "allowed_values.list" has the set of allowed strings. + AttrValue allowed_values = 7; + } + repeated AttrDef attr = 4; + + // Optional deprecation based on GraphDef versions. + OpDeprecation deprecation = 8; + + // One-line human-readable description of what the Op does. + string summary = 5; + + // Additional, longer human-readable description of what the Op does. + string description = 6; + + // ------------------------------------------------------------------------- + // Which optimizations this operation can participate in. + + // True if the operation is commutative ("op(a,b) == op(b,a)" for all inputs) + bool is_commutative = 18; + + // If is_aggregate is true, then this operation accepts N >= 2 + // inputs and produces 1 output all of the same type. Should be + // associative and commutative, and produce output with the same + // shape as the input. The optimizer may replace an aggregate op + // taking input from multiple devices with a tree of aggregate ops + // that aggregate locally within each device (and possibly within + // groups of nearby devices) before communicating. + // TODO(josh11b): Implement that optimization. + bool is_aggregate = 16; // for things like add + + // Other optimizations go here, like + // can_alias_input, rewrite_when_output_unused, partitioning_strategy, etc. + + // ------------------------------------------------------------------------- + // Optimization constraints. + + // Ops are marked as stateful if their behavior depends on some state beyond + // their input tensors (e.g. variable reading op) or if they have + // a side-effect (e.g. printing or asserting ops). Equivalently, stateless ops + // must always produce the same output for the same input and have + // no side-effects. + // + // By default Ops may be moved between devices. Stateful ops should + // either not be moved, or should only be moved if that state can also + // be moved (e.g. via some sort of save / restore). + // Stateful ops are guaranteed to never be optimized away by Common + // Subexpression Elimination (CSE). + bool is_stateful = 17; // for things like variables, queue + + // ------------------------------------------------------------------------- + // Non-standard options. + + // By default, all inputs to an Op must be initialized Tensors. Ops + // that may initialize tensors for the first time should set this + // field to true, to allow the Op to take an uninitialized Tensor as + // input. + bool allows_uninitialized_input = 19; // for Assign, etc. +}; +// LINT.ThenChange( +// https://www.tensorflow.org/code/tensorflow/core/framework/op_def_util.cc) + +// Information about version-dependent deprecation of an op +message OpDeprecation { + // First GraphDef version at which the op is disallowed. + int32 version = 1; + + // Explanation of why it was deprecated and what to use instead. + string explanation = 2; +}; + +// A collection of OpDefs +message OpList { + repeated OpDef op = 1; +}; diff --git a/executor/proto/tensorflow/core/framework/reader_base.pb.go b/executor/proto/tensorflow/core/framework/reader_base.pb.go new file mode 100644 index 0000000000..3d3f988afe --- /dev/null +++ b/executor/proto/tensorflow/core/framework/reader_base.pb.go @@ -0,0 +1,114 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/reader_base.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// For serializing and restoring the state of ReaderBase, see +// reader_base.h for details. +type ReaderBaseState struct { + WorkStarted int64 `protobuf:"varint,1,opt,name=work_started,json=workStarted,proto3" json:"work_started,omitempty"` + WorkFinished int64 `protobuf:"varint,2,opt,name=work_finished,json=workFinished,proto3" json:"work_finished,omitempty"` + NumRecordsProduced int64 `protobuf:"varint,3,opt,name=num_records_produced,json=numRecordsProduced,proto3" json:"num_records_produced,omitempty"` + CurrentWork []byte `protobuf:"bytes,4,opt,name=current_work,json=currentWork,proto3" json:"current_work,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReaderBaseState) Reset() { *m = ReaderBaseState{} } +func (m *ReaderBaseState) String() string { return proto.CompactTextString(m) } +func (*ReaderBaseState) ProtoMessage() {} +func (*ReaderBaseState) Descriptor() ([]byte, []int) { + return fileDescriptor_9d8282e7620a01b6, []int{0} +} + +func (m *ReaderBaseState) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReaderBaseState.Unmarshal(m, b) +} +func (m *ReaderBaseState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReaderBaseState.Marshal(b, m, deterministic) +} +func (m *ReaderBaseState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReaderBaseState.Merge(m, src) +} +func (m *ReaderBaseState) XXX_Size() int { + return xxx_messageInfo_ReaderBaseState.Size(m) +} +func (m *ReaderBaseState) XXX_DiscardUnknown() { + xxx_messageInfo_ReaderBaseState.DiscardUnknown(m) +} + +var xxx_messageInfo_ReaderBaseState proto.InternalMessageInfo + +func (m *ReaderBaseState) GetWorkStarted() int64 { + if m != nil { + return m.WorkStarted + } + return 0 +} + +func (m *ReaderBaseState) GetWorkFinished() int64 { + if m != nil { + return m.WorkFinished + } + return 0 +} + +func (m *ReaderBaseState) GetNumRecordsProduced() int64 { + if m != nil { + return m.NumRecordsProduced + } + return 0 +} + +func (m *ReaderBaseState) GetCurrentWork() []byte { + if m != nil { + return m.CurrentWork + } + return nil +} + +func init() { + proto.RegisterType((*ReaderBaseState)(nil), "tensorflow.ReaderBaseState") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/reader_base.proto", fileDescriptor_9d8282e7620a01b6) +} + +var fileDescriptor_9d8282e7620a01b6 = []byte{ + // 248 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4b, 0xf4, 0x30, + 0x10, 0x86, 0xc9, 0xb7, 0x1f, 0x1e, 0xb2, 0x2b, 0x4a, 0xf0, 0x90, 0xe3, 0xaa, 0x97, 0x05, 0xa1, + 0x15, 0x3c, 0x7b, 0xe9, 0xc1, 0x73, 0xe9, 0x1e, 0x04, 0x2f, 0x21, 0x4d, 0xa6, 0xdd, 0xb2, 0x36, + 0x53, 0x26, 0x09, 0xfb, 0xaf, 0xfc, 0x7d, 0x1e, 0x25, 0xd9, 0x62, 0x45, 0xbc, 0x85, 0x77, 0x9e, + 0xc9, 0xf0, 0xbc, 0xfc, 0x21, 0x80, 0xf3, 0x48, 0xdd, 0x3b, 0x9e, 0x4a, 0x83, 0x04, 0x65, 0x47, + 0x7a, 0x84, 0x13, 0xd2, 0xb1, 0x24, 0xd0, 0x16, 0x48, 0xb5, 0xda, 0x43, 0x31, 0x11, 0x06, 0x14, + 0x7c, 0x81, 0xef, 0x3e, 0x18, 0xbf, 0x6a, 0x32, 0x51, 0x69, 0x0f, 0xfb, 0xa0, 0x03, 0x88, 0x5b, + 0xbe, 0x49, 0x9b, 0xca, 0x07, 0x4d, 0x01, 0xac, 0x64, 0x5b, 0xb6, 0x5b, 0x35, 0xeb, 0x94, 0xed, + 0xcf, 0x91, 0xb8, 0xe7, 0x97, 0x19, 0xe9, 0x06, 0x37, 0xf8, 0x03, 0x58, 0xf9, 0x2f, 0x33, 0x79, + 0xef, 0x65, 0xce, 0xc4, 0x23, 0xbf, 0x71, 0x71, 0x54, 0x04, 0x06, 0xc9, 0x7a, 0x35, 0x11, 0xda, + 0x68, 0xc0, 0xca, 0x55, 0x66, 0x85, 0x8b, 0x63, 0x73, 0x1e, 0xd5, 0xf3, 0x24, 0x5d, 0x36, 0x91, + 0x08, 0x5c, 0x50, 0xe9, 0x27, 0xf9, 0x7f, 0xcb, 0x76, 0x9b, 0x66, 0x3d, 0x67, 0xaf, 0x48, 0xc7, + 0x6a, 0xe2, 0x12, 0xa9, 0x2f, 0x16, 0x85, 0xe2, 0x5b, 0xb5, 0xba, 0x5e, 0x4c, 0xea, 0x64, 0xea, + 0x6b, 0xf6, 0xf6, 0xdc, 0x0f, 0xe1, 0x10, 0xdb, 0xc2, 0xe0, 0x58, 0xfe, 0x28, 0xe9, 0xef, 0x67, + 0x8f, 0xbf, 0xda, 0xfb, 0x64, 0xac, 0xbd, 0xc8, 0xad, 0x3d, 0x7d, 0x05, 0x00, 0x00, 0xff, 0xff, + 0x3c, 0xeb, 0xa6, 0x58, 0x64, 0x01, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/reader_base.proto b/executor/proto/tensorflow/core/framework/reader_base.proto new file mode 100644 index 0000000000..9e187cfa79 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/reader_base.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "ReaderBaseProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; + +// For serializing and restoring the state of ReaderBase, see +// reader_base.h for details. +message ReaderBaseState { + int64 work_started = 1; + int64 work_finished = 2; + int64 num_records_produced = 3; + bytes current_work = 4; +}; diff --git a/executor/proto/tensorflow/core/framework/remote_fused_graph_execute_info.pb.go b/executor/proto/tensorflow/core/framework/remote_fused_graph_execute_info.pb.go new file mode 100644 index 0000000000..a8386510b3 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/remote_fused_graph_execute_info.pb.go @@ -0,0 +1,215 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/remote_fused_graph_execute_info.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Protocol buffer representing a handle to a tensorflow resource. Handles are +// not valid across executions, but can be serialized back and forth from within +// a single run. +type RemoteFusedGraphExecuteInfo struct { + // Definition of remote graph + RemoteGraph *GraphDef `protobuf:"bytes,1,opt,name=remote_graph,json=remoteGraph,proto3" json:"remote_graph,omitempty"` + // Remote fused graph input node name + GraphInputNodeName []string `protobuf:"bytes,2,rep,name=graph_input_node_name,json=graphInputNodeName,proto3" json:"graph_input_node_name,omitempty"` + // Remote fused graph output node name + GraphOutputNodeName []string `protobuf:"bytes,3,rep,name=graph_output_node_name,json=graphOutputNodeName,proto3" json:"graph_output_node_name,omitempty"` + // Executor's name + ExecutorName string `protobuf:"bytes,4,opt,name=executor_name,json=executorName,proto3" json:"executor_name,omitempty"` + // Optional: Parameters given to the executor + SerializedExecutorParameters []byte `protobuf:"bytes,5,opt,name=serialized_executor_parameters,json=serializedExecutorParameters,proto3" json:"serialized_executor_parameters,omitempty"` + // Optional: Default graph input tensor shape used to allocate memory + // before executing op + DefaultGraphInputTensorShape []*RemoteFusedGraphExecuteInfo_TensorShapeTypeProto `protobuf:"bytes,6,rep,name=default_graph_input_tensor_shape,json=defaultGraphInputTensorShape,proto3" json:"default_graph_input_tensor_shape,omitempty"` + // Optional: Default graph input tensor shape used to allocate memory + // before executing op + // TODO(satok): Remote output tensor shape once shape information is stored + // in NodeDef + DefaultGraphOutputTensorShape []*RemoteFusedGraphExecuteInfo_TensorShapeTypeProto `protobuf:"bytes,7,rep,name=default_graph_output_tensor_shape,json=defaultGraphOutputTensorShape,proto3" json:"default_graph_output_tensor_shape,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoteFusedGraphExecuteInfo) Reset() { *m = RemoteFusedGraphExecuteInfo{} } +func (m *RemoteFusedGraphExecuteInfo) String() string { return proto.CompactTextString(m) } +func (*RemoteFusedGraphExecuteInfo) ProtoMessage() {} +func (*RemoteFusedGraphExecuteInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_c15f13da5b37f691, []int{0} +} + +func (m *RemoteFusedGraphExecuteInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RemoteFusedGraphExecuteInfo.Unmarshal(m, b) +} +func (m *RemoteFusedGraphExecuteInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RemoteFusedGraphExecuteInfo.Marshal(b, m, deterministic) +} +func (m *RemoteFusedGraphExecuteInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoteFusedGraphExecuteInfo.Merge(m, src) +} +func (m *RemoteFusedGraphExecuteInfo) XXX_Size() int { + return xxx_messageInfo_RemoteFusedGraphExecuteInfo.Size(m) +} +func (m *RemoteFusedGraphExecuteInfo) XXX_DiscardUnknown() { + xxx_messageInfo_RemoteFusedGraphExecuteInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoteFusedGraphExecuteInfo proto.InternalMessageInfo + +func (m *RemoteFusedGraphExecuteInfo) GetRemoteGraph() *GraphDef { + if m != nil { + return m.RemoteGraph + } + return nil +} + +func (m *RemoteFusedGraphExecuteInfo) GetGraphInputNodeName() []string { + if m != nil { + return m.GraphInputNodeName + } + return nil +} + +func (m *RemoteFusedGraphExecuteInfo) GetGraphOutputNodeName() []string { + if m != nil { + return m.GraphOutputNodeName + } + return nil +} + +func (m *RemoteFusedGraphExecuteInfo) GetExecutorName() string { + if m != nil { + return m.ExecutorName + } + return "" +} + +func (m *RemoteFusedGraphExecuteInfo) GetSerializedExecutorParameters() []byte { + if m != nil { + return m.SerializedExecutorParameters + } + return nil +} + +func (m *RemoteFusedGraphExecuteInfo) GetDefaultGraphInputTensorShape() []*RemoteFusedGraphExecuteInfo_TensorShapeTypeProto { + if m != nil { + return m.DefaultGraphInputTensorShape + } + return nil +} + +func (m *RemoteFusedGraphExecuteInfo) GetDefaultGraphOutputTensorShape() []*RemoteFusedGraphExecuteInfo_TensorShapeTypeProto { + if m != nil { + return m.DefaultGraphOutputTensorShape + } + return nil +} + +type RemoteFusedGraphExecuteInfo_TensorShapeTypeProto struct { + Dtype DataType `protobuf:"varint,1,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"` + Shape *TensorShapeProto `protobuf:"bytes,2,opt,name=shape,proto3" json:"shape,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoteFusedGraphExecuteInfo_TensorShapeTypeProto) Reset() { + *m = RemoteFusedGraphExecuteInfo_TensorShapeTypeProto{} +} +func (m *RemoteFusedGraphExecuteInfo_TensorShapeTypeProto) String() string { + return proto.CompactTextString(m) +} +func (*RemoteFusedGraphExecuteInfo_TensorShapeTypeProto) ProtoMessage() {} +func (*RemoteFusedGraphExecuteInfo_TensorShapeTypeProto) Descriptor() ([]byte, []int) { + return fileDescriptor_c15f13da5b37f691, []int{0, 0} +} + +func (m *RemoteFusedGraphExecuteInfo_TensorShapeTypeProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RemoteFusedGraphExecuteInfo_TensorShapeTypeProto.Unmarshal(m, b) +} +func (m *RemoteFusedGraphExecuteInfo_TensorShapeTypeProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RemoteFusedGraphExecuteInfo_TensorShapeTypeProto.Marshal(b, m, deterministic) +} +func (m *RemoteFusedGraphExecuteInfo_TensorShapeTypeProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoteFusedGraphExecuteInfo_TensorShapeTypeProto.Merge(m, src) +} +func (m *RemoteFusedGraphExecuteInfo_TensorShapeTypeProto) XXX_Size() int { + return xxx_messageInfo_RemoteFusedGraphExecuteInfo_TensorShapeTypeProto.Size(m) +} +func (m *RemoteFusedGraphExecuteInfo_TensorShapeTypeProto) XXX_DiscardUnknown() { + xxx_messageInfo_RemoteFusedGraphExecuteInfo_TensorShapeTypeProto.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoteFusedGraphExecuteInfo_TensorShapeTypeProto proto.InternalMessageInfo + +func (m *RemoteFusedGraphExecuteInfo_TensorShapeTypeProto) GetDtype() DataType { + if m != nil { + return m.Dtype + } + return DataType_DT_INVALID +} + +func (m *RemoteFusedGraphExecuteInfo_TensorShapeTypeProto) GetShape() *TensorShapeProto { + if m != nil { + return m.Shape + } + return nil +} + +func init() { + proto.RegisterType((*RemoteFusedGraphExecuteInfo)(nil), "tensorflow.RemoteFusedGraphExecuteInfo") + proto.RegisterType((*RemoteFusedGraphExecuteInfo_TensorShapeTypeProto)(nil), "tensorflow.RemoteFusedGraphExecuteInfo.TensorShapeTypeProto") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/remote_fused_graph_execute_info.proto", fileDescriptor_c15f13da5b37f691) +} + +var fileDescriptor_c15f13da5b37f691 = []byte{ + // 457 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x93, 0xc1, 0x6f, 0xd3, 0x30, + 0x14, 0xc6, 0xe5, 0x95, 0x0e, 0xcd, 0x2d, 0x1c, 0xcc, 0x40, 0x51, 0x29, 0x28, 0x80, 0x90, 0x22, + 0x84, 0x12, 0xd1, 0x1d, 0xb8, 0x80, 0x90, 0xa6, 0x8e, 0x69, 0x97, 0x51, 0x85, 0x9d, 0xb8, 0x58, + 0x5e, 0xf3, 0x92, 0x46, 0x34, 0x79, 0x91, 0xe3, 0x30, 0xc6, 0x89, 0x03, 0xe2, 0xff, 0xe1, 0xbf, + 0xe3, 0x88, 0x6c, 0x87, 0xd4, 0x41, 0x6b, 0x4f, 0xdc, 0x92, 0xbc, 0xdf, 0xf7, 0xfc, 0xbd, 0x2f, + 0xcf, 0xf4, 0x9d, 0x82, 0xb2, 0x46, 0x99, 0xae, 0xf1, 0x2a, 0x5a, 0xa2, 0x84, 0x28, 0x95, 0xa2, + 0x80, 0x2b, 0x94, 0x9f, 0x23, 0x09, 0x05, 0x2a, 0xe0, 0x69, 0x53, 0x43, 0xc2, 0x33, 0x29, 0xaa, + 0x15, 0x87, 0xaf, 0xb0, 0x6c, 0x14, 0xf0, 0xbc, 0x4c, 0x31, 0xac, 0x24, 0x2a, 0x64, 0x74, 0xd3, + 0x60, 0xf2, 0x7c, 0x7b, 0x33, 0xa3, 0xb7, 0x92, 0xc9, 0xcb, 0xed, 0x98, 0xad, 0xf0, 0x7a, 0x25, + 0x2a, 0x68, 0xe9, 0x1d, 0x4d, 0xd5, 0x75, 0x05, 0xb5, 0xc5, 0x9e, 0xfe, 0x1a, 0xd2, 0x87, 0xb1, + 0x71, 0xfc, 0x5e, 0x1b, 0x3e, 0xd5, 0xe7, 0x9d, 0x58, 0xbb, 0x67, 0x65, 0x8a, 0xec, 0x35, 0x1d, + 0xb7, 0x03, 0x19, 0x2b, 0x1e, 0xf1, 0x49, 0x30, 0x9a, 0x1d, 0x86, 0x9b, 0xee, 0xa1, 0xd1, 0xcc, + 0x21, 0x8d, 0x47, 0x96, 0x34, 0xef, 0xec, 0x15, 0xbd, 0x6f, 0x87, 0xcf, 0xcb, 0xaa, 0x51, 0xbc, + 0xc4, 0x04, 0x78, 0x29, 0x0a, 0xf0, 0xf6, 0xfc, 0x41, 0x70, 0x10, 0x33, 0x53, 0x3c, 0xd3, 0xb5, + 0x73, 0x4c, 0xe0, 0x5c, 0x14, 0xc0, 0x8e, 0xe8, 0x03, 0x2b, 0xc1, 0x46, 0xf5, 0x35, 0x03, 0xa3, + 0xb9, 0x67, 0xaa, 0x1f, 0x4c, 0xb1, 0x13, 0x3d, 0xa3, 0x77, 0x6c, 0xbc, 0x28, 0x2d, 0x7b, 0xcb, + 0x27, 0xc1, 0x41, 0x3c, 0xfe, 0xfb, 0xd1, 0x40, 0x73, 0xfa, 0xb8, 0x06, 0x99, 0x8b, 0x75, 0xfe, + 0x0d, 0x12, 0xde, 0xf1, 0x95, 0xd0, 0x99, 0x28, 0x90, 0xb5, 0x37, 0xf4, 0x49, 0x30, 0x8e, 0xa7, + 0x1b, 0xea, 0xa4, 0x85, 0x16, 0x1d, 0xc3, 0x7e, 0x10, 0xea, 0x27, 0x90, 0x8a, 0x66, 0xad, 0xb8, + 0x3b, 0x9b, 0x9b, 0xbe, 0xb7, 0xef, 0x0f, 0x82, 0xd1, 0xec, 0x8d, 0x1b, 0xd0, 0x8e, 0x7c, 0xc3, + 0x0b, 0x83, 0x7d, 0xd4, 0xd2, 0x8b, 0xeb, 0x0a, 0x16, 0xfa, 0xa7, 0xc4, 0xd3, 0xf6, 0x94, 0xd3, + 0x2e, 0x23, 0x07, 0x63, 0x3f, 0x09, 0x7d, 0xd2, 0xb7, 0xd1, 0xe6, 0xd5, 0xf3, 0x71, 0xfb, 0x3f, + 0xf8, 0x78, 0xe4, 0xfa, 0xb0, 0xb9, 0x3b, 0xdc, 0xe4, 0x0b, 0x3d, 0xbc, 0x49, 0xc6, 0x5e, 0xd0, + 0x61, 0xa2, 0x77, 0xcc, 0x2c, 0xcb, 0xdd, 0xfe, 0xb2, 0xcc, 0x85, 0x12, 0x9a, 0x8c, 0x2d, 0xc2, + 0x66, 0x74, 0x68, 0xfd, 0xee, 0x99, 0xc5, 0x9a, 0xba, 0xac, 0xd3, 0xdc, 0xfa, 0xb1, 0xe8, 0xf1, + 0x77, 0x42, 0x3d, 0x94, 0x99, 0x8b, 0x76, 0xcb, 0x7d, 0xec, 0xef, 0x98, 0xd2, 0x74, 0x59, 0x90, + 0x4f, 0x6f, 0xb3, 0x5c, 0xad, 0x9a, 0xcb, 0x70, 0x89, 0x45, 0xe4, 0x5c, 0x93, 0x9b, 0x1f, 0x33, + 0xfc, 0xe7, 0xfe, 0xfc, 0x26, 0xe4, 0x72, 0xdf, 0xdc, 0x9e, 0xa3, 0x3f, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x41, 0x7e, 0x4c, 0x93, 0x08, 0x04, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/remote_fused_graph_execute_info.proto b/executor/proto/tensorflow/core/framework/remote_fused_graph_execute_info.proto new file mode 100644 index 0000000000..10072724d2 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/remote_fused_graph_execute_info.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "RemoteFusedGraphExecuteInfoProto"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; +import "tensorflow/core/framework/graph.proto"; +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/types.proto"; + +// Protocol buffer representing a handle to a tensorflow resource. Handles are +// not valid across executions, but can be serialized back and forth from within +// a single run. +message RemoteFusedGraphExecuteInfo { + + message TensorShapeTypeProto { + DataType dtype = 1; + TensorShapeProto shape = 2; + } + + // Definition of remote graph + GraphDef remote_graph = 1; + + // Remote fused graph input node name + repeated string graph_input_node_name = 2; + + // Remote fused graph output node name + repeated string graph_output_node_name = 3; + + // Executor's name + string executor_name = 4; + + // Optional: Parameters given to the executor + bytes serialized_executor_parameters = 5; + + // Optional: Default graph input tensor shape used to allocate memory + // before executing op + repeated TensorShapeTypeProto default_graph_input_tensor_shape = 6; + + // Optional: Default graph input tensor shape used to allocate memory + // before executing op + // TODO(satok): Remote output tensor shape once shape information is stored + // in NodeDef + repeated TensorShapeTypeProto default_graph_output_tensor_shape = 7; +}; diff --git a/executor/proto/tensorflow/core/framework/resource_handle.pb.go b/executor/proto/tensorflow/core/framework/resource_handle.pb.go new file mode 100644 index 0000000000..bb0d45c59c --- /dev/null +++ b/executor/proto/tensorflow/core/framework/resource_handle.pb.go @@ -0,0 +1,194 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/resource_handle.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Protocol buffer representing a handle to a tensorflow resource. Handles are +// not valid across executions, but can be serialized back and forth from within +// a single run. +type ResourceHandleProto struct { + // Unique name for the device containing the resource. + Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"` + // Container in which this resource is placed. + Container string `protobuf:"bytes,2,opt,name=container,proto3" json:"container,omitempty"` + // Unique name of this resource. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Hash code for the type of the resource. Is only valid in the same device + // and in the same execution. + HashCode uint64 `protobuf:"varint,4,opt,name=hash_code,json=hashCode,proto3" json:"hash_code,omitempty"` + // For debug-only, the name of the type pointed to by this handle, if + // available. + MaybeTypeName string `protobuf:"bytes,5,opt,name=maybe_type_name,json=maybeTypeName,proto3" json:"maybe_type_name,omitempty"` + // Data types and shapes for the underlying resource. + DtypesAndShapes []*ResourceHandleProto_DtypeAndShape `protobuf:"bytes,6,rep,name=dtypes_and_shapes,json=dtypesAndShapes,proto3" json:"dtypes_and_shapes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceHandleProto) Reset() { *m = ResourceHandleProto{} } +func (m *ResourceHandleProto) String() string { return proto.CompactTextString(m) } +func (*ResourceHandleProto) ProtoMessage() {} +func (*ResourceHandleProto) Descriptor() ([]byte, []int) { + return fileDescriptor_a36024d2bd9a2afd, []int{0} +} + +func (m *ResourceHandleProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourceHandleProto.Unmarshal(m, b) +} +func (m *ResourceHandleProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourceHandleProto.Marshal(b, m, deterministic) +} +func (m *ResourceHandleProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceHandleProto.Merge(m, src) +} +func (m *ResourceHandleProto) XXX_Size() int { + return xxx_messageInfo_ResourceHandleProto.Size(m) +} +func (m *ResourceHandleProto) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceHandleProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceHandleProto proto.InternalMessageInfo + +func (m *ResourceHandleProto) GetDevice() string { + if m != nil { + return m.Device + } + return "" +} + +func (m *ResourceHandleProto) GetContainer() string { + if m != nil { + return m.Container + } + return "" +} + +func (m *ResourceHandleProto) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ResourceHandleProto) GetHashCode() uint64 { + if m != nil { + return m.HashCode + } + return 0 +} + +func (m *ResourceHandleProto) GetMaybeTypeName() string { + if m != nil { + return m.MaybeTypeName + } + return "" +} + +func (m *ResourceHandleProto) GetDtypesAndShapes() []*ResourceHandleProto_DtypeAndShape { + if m != nil { + return m.DtypesAndShapes + } + return nil +} + +// Protocol buffer representing a pair of (data type, tensor shape). +type ResourceHandleProto_DtypeAndShape struct { + Dtype DataType `protobuf:"varint,1,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"` + Shape *TensorShapeProto `protobuf:"bytes,2,opt,name=shape,proto3" json:"shape,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceHandleProto_DtypeAndShape) Reset() { *m = ResourceHandleProto_DtypeAndShape{} } +func (m *ResourceHandleProto_DtypeAndShape) String() string { return proto.CompactTextString(m) } +func (*ResourceHandleProto_DtypeAndShape) ProtoMessage() {} +func (*ResourceHandleProto_DtypeAndShape) Descriptor() ([]byte, []int) { + return fileDescriptor_a36024d2bd9a2afd, []int{0, 0} +} + +func (m *ResourceHandleProto_DtypeAndShape) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourceHandleProto_DtypeAndShape.Unmarshal(m, b) +} +func (m *ResourceHandleProto_DtypeAndShape) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourceHandleProto_DtypeAndShape.Marshal(b, m, deterministic) +} +func (m *ResourceHandleProto_DtypeAndShape) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceHandleProto_DtypeAndShape.Merge(m, src) +} +func (m *ResourceHandleProto_DtypeAndShape) XXX_Size() int { + return xxx_messageInfo_ResourceHandleProto_DtypeAndShape.Size(m) +} +func (m *ResourceHandleProto_DtypeAndShape) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceHandleProto_DtypeAndShape.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceHandleProto_DtypeAndShape proto.InternalMessageInfo + +func (m *ResourceHandleProto_DtypeAndShape) GetDtype() DataType { + if m != nil { + return m.Dtype + } + return DataType_DT_INVALID +} + +func (m *ResourceHandleProto_DtypeAndShape) GetShape() *TensorShapeProto { + if m != nil { + return m.Shape + } + return nil +} + +func init() { + proto.RegisterType((*ResourceHandleProto)(nil), "tensorflow.ResourceHandleProto") + proto.RegisterType((*ResourceHandleProto_DtypeAndShape)(nil), "tensorflow.ResourceHandleProto.DtypeAndShape") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/resource_handle.proto", fileDescriptor_a36024d2bd9a2afd) +} + +var fileDescriptor_a36024d2bd9a2afd = []byte{ + // 346 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0x4d, 0x4f, 0xc2, 0x40, + 0x10, 0x4d, 0xf9, 0x8a, 0x2c, 0x01, 0xe2, 0x6a, 0x4c, 0x83, 0x1c, 0x88, 0x89, 0x86, 0x18, 0x6d, + 0x93, 0x7a, 0xf6, 0x20, 0x72, 0xf0, 0x64, 0x4c, 0xe5, 0xa2, 0x97, 0x66, 0xe9, 0x0e, 0x94, 0x48, + 0x77, 0x9a, 0xdd, 0x22, 0xe1, 0x5f, 0x7b, 0xf4, 0x68, 0x3a, 0x45, 0x5b, 0x09, 0x7a, 0x9b, 0x9d, + 0x79, 0xef, 0xcd, 0x7b, 0x9d, 0x32, 0x37, 0x05, 0x65, 0x50, 0xcf, 0x96, 0xb8, 0x76, 0x43, 0xd4, + 0xe0, 0xce, 0xb4, 0x88, 0x61, 0x8d, 0xfa, 0xcd, 0xd5, 0x60, 0x70, 0xa5, 0x43, 0x08, 0x22, 0xa1, + 0xe4, 0x12, 0x9c, 0x44, 0x63, 0x8a, 0x9c, 0x15, 0x84, 0xde, 0xd5, 0xdf, 0xe4, 0x7c, 0x12, 0x98, + 0x48, 0x24, 0x5b, 0x66, 0xef, 0xfc, 0x1f, 0xf4, 0x26, 0x01, 0x93, 0xc3, 0xce, 0x3e, 0x2a, 0xec, + 0xc8, 0xdf, 0xae, 0x7e, 0xa0, 0xcd, 0x4f, 0xb4, 0xf8, 0x84, 0x35, 0x24, 0xbc, 0x2f, 0x42, 0xb0, + 0xad, 0x81, 0x35, 0x6c, 0xfa, 0xdb, 0x17, 0xef, 0xb3, 0x66, 0x88, 0x2a, 0x15, 0x0b, 0x05, 0xda, + 0xae, 0xd0, 0xa8, 0x68, 0x70, 0xce, 0x6a, 0x4a, 0xc4, 0x60, 0x57, 0x69, 0x40, 0x35, 0x3f, 0x65, + 0xcd, 0x48, 0x98, 0x28, 0x08, 0x51, 0x82, 0x5d, 0x1b, 0x58, 0xc3, 0x9a, 0x7f, 0x90, 0x35, 0xee, + 0x51, 0x02, 0xbf, 0x60, 0xdd, 0x58, 0x6c, 0xa6, 0x10, 0x64, 0x9e, 0x02, 0xe2, 0xd6, 0x89, 0xdb, + 0xa6, 0xf6, 0x64, 0x93, 0xc0, 0x63, 0x26, 0xf2, 0xc2, 0x0e, 0x25, 0xd9, 0x0e, 0x84, 0x92, 0x79, + 0x4e, 0x63, 0x37, 0x06, 0xd5, 0x61, 0xcb, 0xbb, 0x76, 0x8a, 0xa4, 0xce, 0x9e, 0x28, 0xce, 0x38, + 0x23, 0xde, 0x29, 0xf9, 0x9c, 0xb1, 0xfc, 0x6e, 0xae, 0xf3, 0xfd, 0x36, 0x3d, 0x64, 0xed, 0x5f, + 0x08, 0x7e, 0xc9, 0xea, 0x84, 0xa1, 0xe4, 0x1d, 0xef, 0xb8, 0xac, 0x3f, 0x16, 0xa9, 0xc8, 0x4c, + 0xf9, 0x39, 0x84, 0x7b, 0xac, 0x4e, 0x66, 0xe8, 0x53, 0xb4, 0xbc, 0x7e, 0x19, 0x3b, 0xa1, 0x92, + 0x34, 0xc9, 0x88, 0x9f, 0x43, 0x47, 0x8a, 0xd9, 0xa8, 0xe7, 0x65, 0xe4, 0xcf, 0x69, 0x46, 0x9d, + 0x9d, 0x00, 0xd6, 0xeb, 0xed, 0x7c, 0x91, 0x46, 0xab, 0xa9, 0x13, 0x62, 0x5c, 0xfe, 0x7b, 0xf6, + 0x97, 0x73, 0xdc, 0xb9, 0xf5, 0xa7, 0x65, 0x4d, 0x1b, 0x74, 0xe9, 0x9b, 0xaf, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xed, 0x24, 0xf6, 0x8f, 0x7d, 0x02, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/resource_handle.proto b/executor/proto/tensorflow/core/framework/resource_handle.proto new file mode 100644 index 0000000000..4a03fc7589 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/resource_handle.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "ResourceHandle"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; + +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/types.proto"; + +// Protocol buffer representing a handle to a tensorflow resource. Handles are +// not valid across executions, but can be serialized back and forth from within +// a single run. +message ResourceHandleProto { + // Unique name for the device containing the resource. + string device = 1; + + // Container in which this resource is placed. + string container = 2; + + // Unique name of this resource. + string name = 3; + + // Hash code for the type of the resource. Is only valid in the same device + // and in the same execution. + uint64 hash_code = 4; + + // For debug-only, the name of the type pointed to by this handle, if + // available. + string maybe_type_name = 5; + + // Protocol buffer representing a pair of (data type, tensor shape). + message DtypeAndShape { + DataType dtype = 1; + TensorShapeProto shape = 2; + } + + // Data types and shapes for the underlying resource. + repeated DtypeAndShape dtypes_and_shapes = 6; +}; diff --git a/executor/proto/tensorflow/core/framework/step_stats.pb.go b/executor/proto/tensorflow/core/framework/step_stats.pb.go new file mode 100644 index 0000000000..6fb7cd1fb0 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/step_stats.pb.go @@ -0,0 +1,632 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/step_stats.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// An allocation/de-allocation operation performed by the allocator. +type AllocationRecord struct { + // The timestamp of the operation. + AllocMicros int64 `protobuf:"varint,1,opt,name=alloc_micros,json=allocMicros,proto3" json:"alloc_micros,omitempty"` + // Number of bytes allocated, or de-allocated if negative. + AllocBytes int64 `protobuf:"varint,2,opt,name=alloc_bytes,json=allocBytes,proto3" json:"alloc_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AllocationRecord) Reset() { *m = AllocationRecord{} } +func (m *AllocationRecord) String() string { return proto.CompactTextString(m) } +func (*AllocationRecord) ProtoMessage() {} +func (*AllocationRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_1e915309f7ed52e5, []int{0} +} + +func (m *AllocationRecord) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AllocationRecord.Unmarshal(m, b) +} +func (m *AllocationRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AllocationRecord.Marshal(b, m, deterministic) +} +func (m *AllocationRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocationRecord.Merge(m, src) +} +func (m *AllocationRecord) XXX_Size() int { + return xxx_messageInfo_AllocationRecord.Size(m) +} +func (m *AllocationRecord) XXX_DiscardUnknown() { + xxx_messageInfo_AllocationRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocationRecord proto.InternalMessageInfo + +func (m *AllocationRecord) GetAllocMicros() int64 { + if m != nil { + return m.AllocMicros + } + return 0 +} + +func (m *AllocationRecord) GetAllocBytes() int64 { + if m != nil { + return m.AllocBytes + } + return 0 +} + +type AllocatorMemoryUsed struct { + AllocatorName string `protobuf:"bytes,1,opt,name=allocator_name,json=allocatorName,proto3" json:"allocator_name,omitempty"` + // These are per-node allocator memory stats. + TotalBytes int64 `protobuf:"varint,2,opt,name=total_bytes,json=totalBytes,proto3" json:"total_bytes,omitempty"` + PeakBytes int64 `protobuf:"varint,3,opt,name=peak_bytes,json=peakBytes,proto3" json:"peak_bytes,omitempty"` + // The bytes that are not deallocated. + LiveBytes int64 `protobuf:"varint,4,opt,name=live_bytes,json=liveBytes,proto3" json:"live_bytes,omitempty"` + // The allocation and deallocation timeline. + AllocationRecords []*AllocationRecord `protobuf:"bytes,6,rep,name=allocation_records,json=allocationRecords,proto3" json:"allocation_records,omitempty"` + // These are snapshots of the overall allocator memory stats. + // The number of live bytes currently allocated by the allocator. + AllocatorBytesInUse int64 `protobuf:"varint,5,opt,name=allocator_bytes_in_use,json=allocatorBytesInUse,proto3" json:"allocator_bytes_in_use,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AllocatorMemoryUsed) Reset() { *m = AllocatorMemoryUsed{} } +func (m *AllocatorMemoryUsed) String() string { return proto.CompactTextString(m) } +func (*AllocatorMemoryUsed) ProtoMessage() {} +func (*AllocatorMemoryUsed) Descriptor() ([]byte, []int) { + return fileDescriptor_1e915309f7ed52e5, []int{1} +} + +func (m *AllocatorMemoryUsed) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AllocatorMemoryUsed.Unmarshal(m, b) +} +func (m *AllocatorMemoryUsed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AllocatorMemoryUsed.Marshal(b, m, deterministic) +} +func (m *AllocatorMemoryUsed) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocatorMemoryUsed.Merge(m, src) +} +func (m *AllocatorMemoryUsed) XXX_Size() int { + return xxx_messageInfo_AllocatorMemoryUsed.Size(m) +} +func (m *AllocatorMemoryUsed) XXX_DiscardUnknown() { + xxx_messageInfo_AllocatorMemoryUsed.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocatorMemoryUsed proto.InternalMessageInfo + +func (m *AllocatorMemoryUsed) GetAllocatorName() string { + if m != nil { + return m.AllocatorName + } + return "" +} + +func (m *AllocatorMemoryUsed) GetTotalBytes() int64 { + if m != nil { + return m.TotalBytes + } + return 0 +} + +func (m *AllocatorMemoryUsed) GetPeakBytes() int64 { + if m != nil { + return m.PeakBytes + } + return 0 +} + +func (m *AllocatorMemoryUsed) GetLiveBytes() int64 { + if m != nil { + return m.LiveBytes + } + return 0 +} + +func (m *AllocatorMemoryUsed) GetAllocationRecords() []*AllocationRecord { + if m != nil { + return m.AllocationRecords + } + return nil +} + +func (m *AllocatorMemoryUsed) GetAllocatorBytesInUse() int64 { + if m != nil { + return m.AllocatorBytesInUse + } + return 0 +} + +// Output sizes recorded for a single execution of a graph node. +type NodeOutput struct { + Slot int32 `protobuf:"varint,1,opt,name=slot,proto3" json:"slot,omitempty"` + TensorDescription *TensorDescription `protobuf:"bytes,3,opt,name=tensor_description,json=tensorDescription,proto3" json:"tensor_description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeOutput) Reset() { *m = NodeOutput{} } +func (m *NodeOutput) String() string { return proto.CompactTextString(m) } +func (*NodeOutput) ProtoMessage() {} +func (*NodeOutput) Descriptor() ([]byte, []int) { + return fileDescriptor_1e915309f7ed52e5, []int{2} +} + +func (m *NodeOutput) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeOutput.Unmarshal(m, b) +} +func (m *NodeOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeOutput.Marshal(b, m, deterministic) +} +func (m *NodeOutput) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeOutput.Merge(m, src) +} +func (m *NodeOutput) XXX_Size() int { + return xxx_messageInfo_NodeOutput.Size(m) +} +func (m *NodeOutput) XXX_DiscardUnknown() { + xxx_messageInfo_NodeOutput.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeOutput proto.InternalMessageInfo + +func (m *NodeOutput) GetSlot() int32 { + if m != nil { + return m.Slot + } + return 0 +} + +func (m *NodeOutput) GetTensorDescription() *TensorDescription { + if m != nil { + return m.TensorDescription + } + return nil +} + +// For memory tracking. +type MemoryStats struct { + TempMemorySize int64 `protobuf:"varint,1,opt,name=temp_memory_size,json=tempMemorySize,proto3" json:"temp_memory_size,omitempty"` + PersistentMemorySize int64 `protobuf:"varint,3,opt,name=persistent_memory_size,json=persistentMemorySize,proto3" json:"persistent_memory_size,omitempty"` + PersistentTensorAllocIds []int64 `protobuf:"varint,5,rep,packed,name=persistent_tensor_alloc_ids,json=persistentTensorAllocIds,proto3" json:"persistent_tensor_alloc_ids,omitempty"` + DeviceTempMemorySize int64 `protobuf:"varint,2,opt,name=device_temp_memory_size,json=deviceTempMemorySize,proto3" json:"device_temp_memory_size,omitempty"` // Deprecated: Do not use. + DevicePersistentMemorySize int64 `protobuf:"varint,4,opt,name=device_persistent_memory_size,json=devicePersistentMemorySize,proto3" json:"device_persistent_memory_size,omitempty"` // Deprecated: Do not use. + DevicePersistentTensorAllocIds []int64 `protobuf:"varint,6,rep,packed,name=device_persistent_tensor_alloc_ids,json=devicePersistentTensorAllocIds,proto3" json:"device_persistent_tensor_alloc_ids,omitempty"` // Deprecated: Do not use. + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryStats) Reset() { *m = MemoryStats{} } +func (m *MemoryStats) String() string { return proto.CompactTextString(m) } +func (*MemoryStats) ProtoMessage() {} +func (*MemoryStats) Descriptor() ([]byte, []int) { + return fileDescriptor_1e915309f7ed52e5, []int{3} +} + +func (m *MemoryStats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MemoryStats.Unmarshal(m, b) +} +func (m *MemoryStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MemoryStats.Marshal(b, m, deterministic) +} +func (m *MemoryStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryStats.Merge(m, src) +} +func (m *MemoryStats) XXX_Size() int { + return xxx_messageInfo_MemoryStats.Size(m) +} +func (m *MemoryStats) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryStats.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryStats proto.InternalMessageInfo + +func (m *MemoryStats) GetTempMemorySize() int64 { + if m != nil { + return m.TempMemorySize + } + return 0 +} + +func (m *MemoryStats) GetPersistentMemorySize() int64 { + if m != nil { + return m.PersistentMemorySize + } + return 0 +} + +func (m *MemoryStats) GetPersistentTensorAllocIds() []int64 { + if m != nil { + return m.PersistentTensorAllocIds + } + return nil +} + +// Deprecated: Do not use. +func (m *MemoryStats) GetDeviceTempMemorySize() int64 { + if m != nil { + return m.DeviceTempMemorySize + } + return 0 +} + +// Deprecated: Do not use. +func (m *MemoryStats) GetDevicePersistentMemorySize() int64 { + if m != nil { + return m.DevicePersistentMemorySize + } + return 0 +} + +// Deprecated: Do not use. +func (m *MemoryStats) GetDevicePersistentTensorAllocIds() []int64 { + if m != nil { + return m.DevicePersistentTensorAllocIds + } + return nil +} + +// Time/size stats recorded for a single execution of a graph node. +type NodeExecStats struct { + // TODO(tucker): Use some more compact form of node identity than + // the full string name. Either all processes should agree on a + // global id (cost_id?) for each node, or we should use a hash of + // the name. + NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` + AllStartMicros int64 `protobuf:"varint,2,opt,name=all_start_micros,json=allStartMicros,proto3" json:"all_start_micros,omitempty"` + OpStartRelMicros int64 `protobuf:"varint,3,opt,name=op_start_rel_micros,json=opStartRelMicros,proto3" json:"op_start_rel_micros,omitempty"` + OpEndRelMicros int64 `protobuf:"varint,4,opt,name=op_end_rel_micros,json=opEndRelMicros,proto3" json:"op_end_rel_micros,omitempty"` + AllEndRelMicros int64 `protobuf:"varint,5,opt,name=all_end_rel_micros,json=allEndRelMicros,proto3" json:"all_end_rel_micros,omitempty"` + Memory []*AllocatorMemoryUsed `protobuf:"bytes,6,rep,name=memory,proto3" json:"memory,omitempty"` + Output []*NodeOutput `protobuf:"bytes,7,rep,name=output,proto3" json:"output,omitempty"` + TimelineLabel string `protobuf:"bytes,8,opt,name=timeline_label,json=timelineLabel,proto3" json:"timeline_label,omitempty"` + ScheduledMicros int64 `protobuf:"varint,9,opt,name=scheduled_micros,json=scheduledMicros,proto3" json:"scheduled_micros,omitempty"` + ThreadId uint32 `protobuf:"varint,10,opt,name=thread_id,json=threadId,proto3" json:"thread_id,omitempty"` + ReferencedTensor []*AllocationDescription `protobuf:"bytes,11,rep,name=referenced_tensor,json=referencedTensor,proto3" json:"referenced_tensor,omitempty"` + MemoryStats *MemoryStats `protobuf:"bytes,12,opt,name=memory_stats,json=memoryStats,proto3" json:"memory_stats,omitempty"` + AllStartNanos int64 `protobuf:"varint,13,opt,name=all_start_nanos,json=allStartNanos,proto3" json:"all_start_nanos,omitempty"` + OpStartRelNanos int64 `protobuf:"varint,14,opt,name=op_start_rel_nanos,json=opStartRelNanos,proto3" json:"op_start_rel_nanos,omitempty"` + OpEndRelNanos int64 `protobuf:"varint,15,opt,name=op_end_rel_nanos,json=opEndRelNanos,proto3" json:"op_end_rel_nanos,omitempty"` + AllEndRelNanos int64 `protobuf:"varint,16,opt,name=all_end_rel_nanos,json=allEndRelNanos,proto3" json:"all_end_rel_nanos,omitempty"` + ScheduledNanos int64 `protobuf:"varint,17,opt,name=scheduled_nanos,json=scheduledNanos,proto3" json:"scheduled_nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeExecStats) Reset() { *m = NodeExecStats{} } +func (m *NodeExecStats) String() string { return proto.CompactTextString(m) } +func (*NodeExecStats) ProtoMessage() {} +func (*NodeExecStats) Descriptor() ([]byte, []int) { + return fileDescriptor_1e915309f7ed52e5, []int{4} +} + +func (m *NodeExecStats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeExecStats.Unmarshal(m, b) +} +func (m *NodeExecStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeExecStats.Marshal(b, m, deterministic) +} +func (m *NodeExecStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeExecStats.Merge(m, src) +} +func (m *NodeExecStats) XXX_Size() int { + return xxx_messageInfo_NodeExecStats.Size(m) +} +func (m *NodeExecStats) XXX_DiscardUnknown() { + xxx_messageInfo_NodeExecStats.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeExecStats proto.InternalMessageInfo + +func (m *NodeExecStats) GetNodeName() string { + if m != nil { + return m.NodeName + } + return "" +} + +func (m *NodeExecStats) GetAllStartMicros() int64 { + if m != nil { + return m.AllStartMicros + } + return 0 +} + +func (m *NodeExecStats) GetOpStartRelMicros() int64 { + if m != nil { + return m.OpStartRelMicros + } + return 0 +} + +func (m *NodeExecStats) GetOpEndRelMicros() int64 { + if m != nil { + return m.OpEndRelMicros + } + return 0 +} + +func (m *NodeExecStats) GetAllEndRelMicros() int64 { + if m != nil { + return m.AllEndRelMicros + } + return 0 +} + +func (m *NodeExecStats) GetMemory() []*AllocatorMemoryUsed { + if m != nil { + return m.Memory + } + return nil +} + +func (m *NodeExecStats) GetOutput() []*NodeOutput { + if m != nil { + return m.Output + } + return nil +} + +func (m *NodeExecStats) GetTimelineLabel() string { + if m != nil { + return m.TimelineLabel + } + return "" +} + +func (m *NodeExecStats) GetScheduledMicros() int64 { + if m != nil { + return m.ScheduledMicros + } + return 0 +} + +func (m *NodeExecStats) GetThreadId() uint32 { + if m != nil { + return m.ThreadId + } + return 0 +} + +func (m *NodeExecStats) GetReferencedTensor() []*AllocationDescription { + if m != nil { + return m.ReferencedTensor + } + return nil +} + +func (m *NodeExecStats) GetMemoryStats() *MemoryStats { + if m != nil { + return m.MemoryStats + } + return nil +} + +func (m *NodeExecStats) GetAllStartNanos() int64 { + if m != nil { + return m.AllStartNanos + } + return 0 +} + +func (m *NodeExecStats) GetOpStartRelNanos() int64 { + if m != nil { + return m.OpStartRelNanos + } + return 0 +} + +func (m *NodeExecStats) GetOpEndRelNanos() int64 { + if m != nil { + return m.OpEndRelNanos + } + return 0 +} + +func (m *NodeExecStats) GetAllEndRelNanos() int64 { + if m != nil { + return m.AllEndRelNanos + } + return 0 +} + +func (m *NodeExecStats) GetScheduledNanos() int64 { + if m != nil { + return m.ScheduledNanos + } + return 0 +} + +type DeviceStepStats struct { + Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"` + NodeStats []*NodeExecStats `protobuf:"bytes,2,rep,name=node_stats,json=nodeStats,proto3" json:"node_stats,omitempty"` + // Its key is thread id. + ThreadNames map[uint32]string `protobuf:"bytes,3,rep,name=thread_names,json=threadNames,proto3" json:"thread_names,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeviceStepStats) Reset() { *m = DeviceStepStats{} } +func (m *DeviceStepStats) String() string { return proto.CompactTextString(m) } +func (*DeviceStepStats) ProtoMessage() {} +func (*DeviceStepStats) Descriptor() ([]byte, []int) { + return fileDescriptor_1e915309f7ed52e5, []int{5} +} + +func (m *DeviceStepStats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeviceStepStats.Unmarshal(m, b) +} +func (m *DeviceStepStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeviceStepStats.Marshal(b, m, deterministic) +} +func (m *DeviceStepStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceStepStats.Merge(m, src) +} +func (m *DeviceStepStats) XXX_Size() int { + return xxx_messageInfo_DeviceStepStats.Size(m) +} +func (m *DeviceStepStats) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceStepStats.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceStepStats proto.InternalMessageInfo + +func (m *DeviceStepStats) GetDevice() string { + if m != nil { + return m.Device + } + return "" +} + +func (m *DeviceStepStats) GetNodeStats() []*NodeExecStats { + if m != nil { + return m.NodeStats + } + return nil +} + +func (m *DeviceStepStats) GetThreadNames() map[uint32]string { + if m != nil { + return m.ThreadNames + } + return nil +} + +type StepStats struct { + DevStats []*DeviceStepStats `protobuf:"bytes,1,rep,name=dev_stats,json=devStats,proto3" json:"dev_stats,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StepStats) Reset() { *m = StepStats{} } +func (m *StepStats) String() string { return proto.CompactTextString(m) } +func (*StepStats) ProtoMessage() {} +func (*StepStats) Descriptor() ([]byte, []int) { + return fileDescriptor_1e915309f7ed52e5, []int{6} +} + +func (m *StepStats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StepStats.Unmarshal(m, b) +} +func (m *StepStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StepStats.Marshal(b, m, deterministic) +} +func (m *StepStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_StepStats.Merge(m, src) +} +func (m *StepStats) XXX_Size() int { + return xxx_messageInfo_StepStats.Size(m) +} +func (m *StepStats) XXX_DiscardUnknown() { + xxx_messageInfo_StepStats.DiscardUnknown(m) +} + +var xxx_messageInfo_StepStats proto.InternalMessageInfo + +func (m *StepStats) GetDevStats() []*DeviceStepStats { + if m != nil { + return m.DevStats + } + return nil +} + +func init() { + proto.RegisterType((*AllocationRecord)(nil), "tensorflow.AllocationRecord") + proto.RegisterType((*AllocatorMemoryUsed)(nil), "tensorflow.AllocatorMemoryUsed") + proto.RegisterType((*NodeOutput)(nil), "tensorflow.NodeOutput") + proto.RegisterType((*MemoryStats)(nil), "tensorflow.MemoryStats") + proto.RegisterType((*NodeExecStats)(nil), "tensorflow.NodeExecStats") + proto.RegisterType((*DeviceStepStats)(nil), "tensorflow.DeviceStepStats") + proto.RegisterMapType((map[uint32]string)(nil), "tensorflow.DeviceStepStats.ThreadNamesEntry") + proto.RegisterType((*StepStats)(nil), "tensorflow.StepStats") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/step_stats.proto", fileDescriptor_1e915309f7ed52e5) +} + +var fileDescriptor_1e915309f7ed52e5 = []byte{ + // 956 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x56, 0xdf, 0x4f, 0xe3, 0x46, + 0x10, 0x96, 0x13, 0x48, 0xc9, 0x84, 0x80, 0xb3, 0x20, 0xce, 0x85, 0xd2, 0xe3, 0x22, 0xb5, 0x17, + 0xfa, 0x23, 0x48, 0x5c, 0xd5, 0xd2, 0x93, 0xae, 0x52, 0xd1, 0xe5, 0x01, 0xf5, 0x2e, 0x87, 0x0c, + 0xd7, 0x87, 0xbe, 0x58, 0xc6, 0x1e, 0xc0, 0xc2, 0xf6, 0x5a, 0xbb, 0x9b, 0x5c, 0xb9, 0x7f, 0xa3, + 0xcf, 0xfd, 0x2f, 0xfb, 0xd0, 0xa7, 0xaa, 0xda, 0xd9, 0x8d, 0xed, 0x24, 0x70, 0x6f, 0xeb, 0x6f, + 0xbf, 0x19, 0xcf, 0xec, 0x7c, 0xdf, 0xda, 0xf0, 0x8d, 0xc2, 0x5c, 0x72, 0x71, 0x9d, 0xf2, 0x0f, + 0x47, 0x11, 0x17, 0x78, 0x74, 0x2d, 0xc2, 0x0c, 0x3f, 0x70, 0x71, 0x77, 0x24, 0x15, 0x16, 0x81, + 0x54, 0xa1, 0x92, 0xc3, 0x42, 0x70, 0xc5, 0x19, 0x54, 0xdc, 0xdd, 0x1f, 0x1f, 0x8f, 0x0b, 0xd3, + 0x94, 0x47, 0xa1, 0x4a, 0x78, 0x1e, 0xc4, 0x28, 0x23, 0x91, 0x14, 0x7a, 0x6d, 0x72, 0xec, 0x1e, + 0x3f, 0x1e, 0x67, 0x76, 0x96, 0x63, 0xfa, 0xbf, 0x83, 0xfb, 0x6b, 0x99, 0xd3, 0xc7, 0x88, 0x8b, + 0x98, 0x3d, 0x83, 0x75, 0x7a, 0x4f, 0x90, 0x25, 0x91, 0xe0, 0xd2, 0x73, 0x0e, 0x9c, 0x41, 0xd3, + 0xef, 0x10, 0xf6, 0x96, 0x20, 0xf6, 0x14, 0xcc, 0x63, 0x70, 0x75, 0xaf, 0x50, 0x7a, 0x0d, 0x62, + 0x00, 0x41, 0xa7, 0x1a, 0xe9, 0xff, 0xdd, 0x80, 0x2d, 0x9b, 0x98, 0x8b, 0xb7, 0x98, 0x71, 0x71, + 0xff, 0x5e, 0x62, 0xcc, 0xbe, 0x82, 0x8d, 0x70, 0x06, 0x07, 0x79, 0x98, 0x21, 0x65, 0x6f, 0xfb, + 0xdd, 0x12, 0x1d, 0x87, 0x19, 0xea, 0xfc, 0x8a, 0xab, 0x30, 0x9d, 0xcf, 0x4f, 0x10, 0xe5, 0x67, + 0xfb, 0x00, 0x05, 0x86, 0x77, 0x76, 0xbf, 0x49, 0xfb, 0x6d, 0x8d, 0x94, 0xdb, 0x69, 0x32, 0x45, + 0xbb, 0xbd, 0x62, 0xb6, 0x35, 0x62, 0xb6, 0x7f, 0x03, 0x56, 0x3b, 0x49, 0x41, 0x6d, 0x4b, 0xaf, + 0x75, 0xd0, 0x1c, 0x74, 0x8e, 0xbf, 0x18, 0x56, 0xc7, 0x38, 0x5c, 0x3c, 0x1b, 0xbf, 0x17, 0x2e, + 0x20, 0x92, 0xbd, 0x80, 0x9d, 0xaa, 0x25, 0x7a, 0x61, 0x90, 0xe4, 0xc1, 0x44, 0xa2, 0xb7, 0x4a, + 0xef, 0xdd, 0x2a, 0x77, 0xe9, 0xe5, 0x67, 0xf9, 0x7b, 0x89, 0xfd, 0x1c, 0x60, 0xcc, 0x63, 0x7c, + 0x37, 0x51, 0xc5, 0x44, 0x31, 0x06, 0x2b, 0x32, 0xe5, 0x8a, 0xce, 0x62, 0xd5, 0xa7, 0x35, 0x7b, + 0x03, 0x6c, 0x79, 0x6a, 0xd4, 0x69, 0xe7, 0x78, 0xbf, 0x5e, 0xe3, 0x25, 0x2d, 0x5f, 0x57, 0x24, + 0xbf, 0xa7, 0x16, 0xa1, 0xfe, 0x7f, 0x0d, 0xe8, 0x98, 0x31, 0x5c, 0x68, 0xd5, 0xb1, 0x01, 0xb8, + 0x0a, 0xb3, 0x22, 0xc8, 0x08, 0x0b, 0x64, 0xf2, 0x11, 0xed, 0x9c, 0x37, 0x34, 0x6e, 0xa9, 0xc9, + 0x47, 0x64, 0x3f, 0xc0, 0x4e, 0x81, 0x42, 0x26, 0x52, 0x61, 0xae, 0xe6, 0xf8, 0xe6, 0xd4, 0xb7, + 0xab, 0xdd, 0x5a, 0xd4, 0x2b, 0xd8, 0xab, 0x45, 0xd9, 0x46, 0x8c, 0x64, 0x92, 0x58, 0x7a, 0xab, + 0x07, 0xcd, 0x41, 0xd3, 0xf7, 0x2a, 0x8a, 0x69, 0x82, 0x8e, 0xfb, 0x2c, 0x96, 0xec, 0x67, 0x78, + 0x12, 0xe3, 0x34, 0x89, 0x30, 0x58, 0xaa, 0x92, 0xb4, 0x70, 0xda, 0xf0, 0x1c, 0x7f, 0xdb, 0x50, + 0x2e, 0xe7, 0xeb, 0x1d, 0xc1, 0xbe, 0x0d, 0x7d, 0xa4, 0xec, 0x95, 0x32, 0xc1, 0xae, 0x21, 0x9e, + 0x3f, 0xd4, 0xc0, 0x18, 0xfa, 0xcb, 0x69, 0x96, 0xfa, 0xd0, 0x92, 0x31, 0xb9, 0xbe, 0x5c, 0xcc, + 0x35, 0xdf, 0x51, 0xff, 0xaf, 0x16, 0x74, 0xf5, 0xc4, 0x47, 0x7f, 0x62, 0x64, 0x46, 0xb0, 0x07, + 0xed, 0x9c, 0xc7, 0x58, 0x77, 0xc1, 0x9a, 0x06, 0xc8, 0x00, 0x03, 0x70, 0xc3, 0x34, 0xd5, 0x57, + 0x84, 0x50, 0x33, 0x1f, 0x1a, 0x17, 0x68, 0xff, 0x5c, 0x68, 0xd8, 0x5a, 0xf1, 0x7b, 0xd8, 0xe2, + 0x85, 0x25, 0x0a, 0x4c, 0x67, 0x64, 0x33, 0x1c, 0x97, 0x17, 0xc4, 0xf5, 0x31, 0xb5, 0xf4, 0x43, + 0xe8, 0xf1, 0x22, 0xc0, 0x3c, 0xae, 0x93, 0x8d, 0x41, 0x36, 0x78, 0x31, 0xca, 0xe3, 0x8a, 0xfa, + 0x2d, 0xb9, 0x64, 0x91, 0x6b, 0x44, 0xbd, 0x19, 0xa6, 0xe9, 0x1c, 0xf9, 0x27, 0x68, 0x99, 0x43, + 0xb6, 0x36, 0x7a, 0xfa, 0x80, 0x8d, 0xea, 0x37, 0x81, 0x6f, 0xe9, 0x6c, 0x08, 0x2d, 0x4e, 0x2e, + 0xf0, 0x3e, 0xa3, 0xc0, 0x9d, 0x7a, 0x60, 0xe5, 0x11, 0xdf, 0xb2, 0xf4, 0x0d, 0xa2, 0x92, 0x0c, + 0xd3, 0x24, 0xc7, 0x20, 0x0d, 0xaf, 0x30, 0xf5, 0xd6, 0xcc, 0x0d, 0x32, 0x43, 0xdf, 0x68, 0x90, + 0x1d, 0x82, 0x2b, 0xa3, 0x5b, 0x8c, 0x27, 0x29, 0xc6, 0xb3, 0xd2, 0xdb, 0xa6, 0xf4, 0x12, 0xb7, + 0xa5, 0xef, 0x41, 0x5b, 0xdd, 0x0a, 0x0c, 0xe3, 0x20, 0x89, 0x3d, 0x38, 0x70, 0x06, 0x5d, 0x7f, + 0xcd, 0x00, 0x67, 0x31, 0x1b, 0x43, 0x4f, 0xe0, 0x35, 0x0a, 0xcc, 0x23, 0x8c, 0xad, 0x00, 0xbc, + 0x0e, 0x55, 0xfa, 0xec, 0xe1, 0x9b, 0xa2, 0xee, 0x44, 0xb7, 0x8a, 0x35, 0x7a, 0x60, 0x2f, 0x61, + 0x7d, 0x26, 0x46, 0xad, 0x02, 0x6f, 0x9d, 0x0c, 0xfd, 0xa4, 0x9e, 0xaa, 0xe6, 0x53, 0xbf, 0x93, + 0xd5, 0x4c, 0xfb, 0x35, 0x6c, 0x56, 0xa2, 0xc8, 0xc3, 0x9c, 0x4b, 0xaf, 0x4b, 0x2d, 0x75, 0x67, + 0x9a, 0x18, 0x6b, 0x50, 0x0f, 0x6e, 0x4e, 0x12, 0x86, 0xba, 0x61, 0xba, 0xaf, 0x14, 0x61, 0xc8, + 0xcf, 0xc1, 0xad, 0x09, 0xc2, 0x50, 0x37, 0x4d, 0xd6, 0x99, 0x1e, 0x0c, 0xf1, 0x10, 0x7a, 0x75, + 0x39, 0x18, 0xa6, 0x5b, 0x6a, 0xb2, 0x4e, 0x7d, 0x0e, 0xd5, 0x21, 0x5b, 0x62, 0xcf, 0x10, 0x4b, + 0x98, 0x88, 0xfd, 0x7f, 0x1c, 0xd8, 0x7c, 0x4d, 0xc6, 0xb9, 0x50, 0x58, 0x98, 0x2e, 0x77, 0xa0, + 0x65, 0xbc, 0x64, 0x4d, 0x61, 0x9f, 0xd8, 0x09, 0x00, 0xf9, 0xc5, 0x9c, 0x5b, 0x83, 0x46, 0xf0, + 0xf9, 0xa2, 0x58, 0x4a, 0x7b, 0xf9, 0x64, 0x2e, 0x93, 0xf1, 0x1d, 0xac, 0xdb, 0x01, 0x6b, 0xaf, + 0x69, 0x6f, 0xe8, 0xd8, 0xef, 0xea, 0xb1, 0x0b, 0x45, 0x0c, 0x2f, 0x89, 0xaf, 0xad, 0x28, 0x47, + 0xb9, 0x12, 0xf7, 0x7e, 0x47, 0x55, 0xc8, 0xee, 0x2f, 0xe0, 0x2e, 0x12, 0x98, 0x0b, 0xcd, 0x3b, + 0xbc, 0xa7, 0x9a, 0xbb, 0xbe, 0x5e, 0xb2, 0x6d, 0x58, 0x9d, 0x86, 0xe9, 0xc4, 0x5c, 0x59, 0x6d, + 0xdf, 0x3c, 0xbc, 0x6c, 0x9c, 0x38, 0xfd, 0x11, 0xb4, 0xab, 0x7e, 0x4f, 0xa0, 0x1d, 0xe3, 0xd4, + 0xb6, 0xe5, 0x50, 0x69, 0x7b, 0x9f, 0x28, 0xcd, 0x5f, 0x8b, 0x71, 0x4a, 0xab, 0x53, 0x0e, 0x1e, + 0x17, 0x37, 0x75, 0x6e, 0xf9, 0xc5, 0x3f, 0xdd, 0x2c, 0x03, 0xce, 0xf5, 0x87, 0x5e, 0x9e, 0x3b, + 0x7f, 0xbc, 0xba, 0x49, 0xd4, 0xed, 0xe4, 0x6a, 0x18, 0xf1, 0xec, 0xa8, 0xf6, 0xab, 0xf0, 0xf0, + 0xf2, 0x86, 0x2f, 0xfc, 0x43, 0xfc, 0xeb, 0x38, 0x57, 0x2d, 0xfa, 0x69, 0x78, 0xf1, 0x7f, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xf2, 0x7d, 0x24, 0x1c, 0xda, 0x08, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/step_stats.proto b/executor/proto/tensorflow/core/framework/step_stats.proto new file mode 100644 index 0000000000..f8cab135ab --- /dev/null +++ b/executor/proto/tensorflow/core/framework/step_stats.proto @@ -0,0 +1,86 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "StepStatsProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; +import "tensorflow/core/framework/allocation_description.proto"; +import "tensorflow/core/framework/tensor_description.proto"; + +// An allocation/de-allocation operation performed by the allocator. +message AllocationRecord { + // The timestamp of the operation. + int64 alloc_micros = 1; + // Number of bytes allocated, or de-allocated if negative. + int64 alloc_bytes = 2; +} + +message AllocatorMemoryUsed { + string allocator_name = 1; + // These are per-node allocator memory stats. + int64 total_bytes = 2; + int64 peak_bytes = 3; + // The bytes that are not deallocated. + int64 live_bytes = 4; + // The allocation and deallocation timeline. + repeated AllocationRecord allocation_records = 6; + + // These are snapshots of the overall allocator memory stats. + // The number of live bytes currently allocated by the allocator. + int64 allocator_bytes_in_use = 5; +} + +// Output sizes recorded for a single execution of a graph node. +message NodeOutput { + int32 slot = 1; + TensorDescription tensor_description = 3; +}; + +// For memory tracking. +message MemoryStats { + int64 temp_memory_size = 1; + int64 persistent_memory_size = 3; + repeated int64 persistent_tensor_alloc_ids = 5; + + int64 device_temp_memory_size = 2 [deprecated = true]; + int64 device_persistent_memory_size = 4 [deprecated = true]; + repeated int64 device_persistent_tensor_alloc_ids = 6 [deprecated = true]; +} + +// Time/size stats recorded for a single execution of a graph node. +message NodeExecStats { + // TODO(tucker): Use some more compact form of node identity than + // the full string name. Either all processes should agree on a + // global id (cost_id?) for each node, or we should use a hash of + // the name. + string node_name = 1; + int64 all_start_micros = 2; + int64 op_start_rel_micros = 3; + int64 op_end_rel_micros = 4; + int64 all_end_rel_micros = 5; + repeated AllocatorMemoryUsed memory = 6; + repeated NodeOutput output = 7; + string timeline_label = 8; + int64 scheduled_micros = 9; + uint32 thread_id = 10; + repeated AllocationDescription referenced_tensor = 11; + MemoryStats memory_stats = 12; + int64 all_start_nanos = 13; + int64 op_start_rel_nanos = 14; + int64 op_end_rel_nanos = 15; + int64 all_end_rel_nanos = 16; + int64 scheduled_nanos = 17; +}; + +message DeviceStepStats { + string device = 1; + repeated NodeExecStats node_stats = 2; + // Its key is thread id. + map thread_names = 3; +} + +message StepStats { + repeated DeviceStepStats dev_stats = 1; +}; diff --git a/executor/proto/tensorflow/core/framework/summary.pb.go b/executor/proto/tensorflow/core/framework/summary.pb.go new file mode 100644 index 0000000000..706eb91b04 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/summary.pb.go @@ -0,0 +1,705 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/summary.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Metadata associated with a series of Summary data +type SummaryDescription struct { + // Hint on how plugins should process the data in this series. + // Supported values include "scalar", "histogram", "image", "audio" + TypeHint string `protobuf:"bytes,1,opt,name=type_hint,json=typeHint,proto3" json:"type_hint,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SummaryDescription) Reset() { *m = SummaryDescription{} } +func (m *SummaryDescription) String() string { return proto.CompactTextString(m) } +func (*SummaryDescription) ProtoMessage() {} +func (*SummaryDescription) Descriptor() ([]byte, []int) { + return fileDescriptor_80d4b41d3e8d8b09, []int{0} +} + +func (m *SummaryDescription) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SummaryDescription.Unmarshal(m, b) +} +func (m *SummaryDescription) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SummaryDescription.Marshal(b, m, deterministic) +} +func (m *SummaryDescription) XXX_Merge(src proto.Message) { + xxx_messageInfo_SummaryDescription.Merge(m, src) +} +func (m *SummaryDescription) XXX_Size() int { + return xxx_messageInfo_SummaryDescription.Size(m) +} +func (m *SummaryDescription) XXX_DiscardUnknown() { + xxx_messageInfo_SummaryDescription.DiscardUnknown(m) +} + +var xxx_messageInfo_SummaryDescription proto.InternalMessageInfo + +func (m *SummaryDescription) GetTypeHint() string { + if m != nil { + return m.TypeHint + } + return "" +} + +// Serialization format for histogram module in +// core/lib/histogram/histogram.h +type HistogramProto struct { + Min float64 `protobuf:"fixed64,1,opt,name=min,proto3" json:"min,omitempty"` + Max float64 `protobuf:"fixed64,2,opt,name=max,proto3" json:"max,omitempty"` + Num float64 `protobuf:"fixed64,3,opt,name=num,proto3" json:"num,omitempty"` + Sum float64 `protobuf:"fixed64,4,opt,name=sum,proto3" json:"sum,omitempty"` + SumSquares float64 `protobuf:"fixed64,5,opt,name=sum_squares,json=sumSquares,proto3" json:"sum_squares,omitempty"` + // Parallel arrays encoding the bucket boundaries and the bucket values. + // bucket(i) is the count for the bucket i. The range for + // a bucket is: + // i == 0: -DBL_MAX .. bucket_limit(0) + // i != 0: bucket_limit(i-1) .. bucket_limit(i) + BucketLimit []float64 `protobuf:"fixed64,6,rep,packed,name=bucket_limit,json=bucketLimit,proto3" json:"bucket_limit,omitempty"` + Bucket []float64 `protobuf:"fixed64,7,rep,packed,name=bucket,proto3" json:"bucket,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HistogramProto) Reset() { *m = HistogramProto{} } +func (m *HistogramProto) String() string { return proto.CompactTextString(m) } +func (*HistogramProto) ProtoMessage() {} +func (*HistogramProto) Descriptor() ([]byte, []int) { + return fileDescriptor_80d4b41d3e8d8b09, []int{1} +} + +func (m *HistogramProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HistogramProto.Unmarshal(m, b) +} +func (m *HistogramProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HistogramProto.Marshal(b, m, deterministic) +} +func (m *HistogramProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_HistogramProto.Merge(m, src) +} +func (m *HistogramProto) XXX_Size() int { + return xxx_messageInfo_HistogramProto.Size(m) +} +func (m *HistogramProto) XXX_DiscardUnknown() { + xxx_messageInfo_HistogramProto.DiscardUnknown(m) +} + +var xxx_messageInfo_HistogramProto proto.InternalMessageInfo + +func (m *HistogramProto) GetMin() float64 { + if m != nil { + return m.Min + } + return 0 +} + +func (m *HistogramProto) GetMax() float64 { + if m != nil { + return m.Max + } + return 0 +} + +func (m *HistogramProto) GetNum() float64 { + if m != nil { + return m.Num + } + return 0 +} + +func (m *HistogramProto) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *HistogramProto) GetSumSquares() float64 { + if m != nil { + return m.SumSquares + } + return 0 +} + +func (m *HistogramProto) GetBucketLimit() []float64 { + if m != nil { + return m.BucketLimit + } + return nil +} + +func (m *HistogramProto) GetBucket() []float64 { + if m != nil { + return m.Bucket + } + return nil +} + +// A SummaryMetadata encapsulates information on which plugins are able to make +// use of a certain summary value. +type SummaryMetadata struct { + // Data that associates a summary with a certain plugin. + PluginData *SummaryMetadata_PluginData `protobuf:"bytes,1,opt,name=plugin_data,json=pluginData,proto3" json:"plugin_data,omitempty"` + // Display name for viewing in TensorBoard. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Longform readable description of the summary sequence. Markdown supported. + SummaryDescription string `protobuf:"bytes,3,opt,name=summary_description,json=summaryDescription,proto3" json:"summary_description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SummaryMetadata) Reset() { *m = SummaryMetadata{} } +func (m *SummaryMetadata) String() string { return proto.CompactTextString(m) } +func (*SummaryMetadata) ProtoMessage() {} +func (*SummaryMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_80d4b41d3e8d8b09, []int{2} +} + +func (m *SummaryMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SummaryMetadata.Unmarshal(m, b) +} +func (m *SummaryMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SummaryMetadata.Marshal(b, m, deterministic) +} +func (m *SummaryMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_SummaryMetadata.Merge(m, src) +} +func (m *SummaryMetadata) XXX_Size() int { + return xxx_messageInfo_SummaryMetadata.Size(m) +} +func (m *SummaryMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_SummaryMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_SummaryMetadata proto.InternalMessageInfo + +func (m *SummaryMetadata) GetPluginData() *SummaryMetadata_PluginData { + if m != nil { + return m.PluginData + } + return nil +} + +func (m *SummaryMetadata) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *SummaryMetadata) GetSummaryDescription() string { + if m != nil { + return m.SummaryDescription + } + return "" +} + +type SummaryMetadata_PluginData struct { + // The name of the plugin this data pertains to. + PluginName string `protobuf:"bytes,1,opt,name=plugin_name,json=pluginName,proto3" json:"plugin_name,omitempty"` + // The content to store for the plugin. The best practice is for this to be + // a binary serialized protocol buffer. + Content []byte `protobuf:"bytes,2,opt,name=content,proto3" json:"content,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SummaryMetadata_PluginData) Reset() { *m = SummaryMetadata_PluginData{} } +func (m *SummaryMetadata_PluginData) String() string { return proto.CompactTextString(m) } +func (*SummaryMetadata_PluginData) ProtoMessage() {} +func (*SummaryMetadata_PluginData) Descriptor() ([]byte, []int) { + return fileDescriptor_80d4b41d3e8d8b09, []int{2, 0} +} + +func (m *SummaryMetadata_PluginData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SummaryMetadata_PluginData.Unmarshal(m, b) +} +func (m *SummaryMetadata_PluginData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SummaryMetadata_PluginData.Marshal(b, m, deterministic) +} +func (m *SummaryMetadata_PluginData) XXX_Merge(src proto.Message) { + xxx_messageInfo_SummaryMetadata_PluginData.Merge(m, src) +} +func (m *SummaryMetadata_PluginData) XXX_Size() int { + return xxx_messageInfo_SummaryMetadata_PluginData.Size(m) +} +func (m *SummaryMetadata_PluginData) XXX_DiscardUnknown() { + xxx_messageInfo_SummaryMetadata_PluginData.DiscardUnknown(m) +} + +var xxx_messageInfo_SummaryMetadata_PluginData proto.InternalMessageInfo + +func (m *SummaryMetadata_PluginData) GetPluginName() string { + if m != nil { + return m.PluginName + } + return "" +} + +func (m *SummaryMetadata_PluginData) GetContent() []byte { + if m != nil { + return m.Content + } + return nil +} + +// A Summary is a set of named values to be displayed by the +// visualizer. +// +// Summaries are produced regularly during training, as controlled by +// the "summary_interval_secs" attribute of the training operation. +// Summaries are also produced at the end of an evaluation. +type Summary struct { + // Set of values for the summary. + Value []*Summary_Value `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} +func (*Summary) Descriptor() ([]byte, []int) { + return fileDescriptor_80d4b41d3e8d8b09, []int{3} +} + +func (m *Summary) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Summary.Unmarshal(m, b) +} +func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Summary.Marshal(b, m, deterministic) +} +func (m *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(m, src) +} +func (m *Summary) XXX_Size() int { + return xxx_messageInfo_Summary.Size(m) +} +func (m *Summary) XXX_DiscardUnknown() { + xxx_messageInfo_Summary.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary proto.InternalMessageInfo + +func (m *Summary) GetValue() []*Summary_Value { + if m != nil { + return m.Value + } + return nil +} + +type Summary_Image struct { + // Dimensions of the image. + Height int32 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Width int32 `protobuf:"varint,2,opt,name=width,proto3" json:"width,omitempty"` + // Valid colorspace values are + // 1 - grayscale + // 2 - grayscale + alpha + // 3 - RGB + // 4 - RGBA + // 5 - DIGITAL_YUV + // 6 - BGRA + Colorspace int32 `protobuf:"varint,3,opt,name=colorspace,proto3" json:"colorspace,omitempty"` + // Image data in encoded format. All image formats supported by + // image_codec::CoderUtil can be stored here. + EncodedImageString []byte `protobuf:"bytes,4,opt,name=encoded_image_string,json=encodedImageString,proto3" json:"encoded_image_string,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Summary_Image) Reset() { *m = Summary_Image{} } +func (m *Summary_Image) String() string { return proto.CompactTextString(m) } +func (*Summary_Image) ProtoMessage() {} +func (*Summary_Image) Descriptor() ([]byte, []int) { + return fileDescriptor_80d4b41d3e8d8b09, []int{3, 0} +} + +func (m *Summary_Image) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Summary_Image.Unmarshal(m, b) +} +func (m *Summary_Image) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Summary_Image.Marshal(b, m, deterministic) +} +func (m *Summary_Image) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary_Image.Merge(m, src) +} +func (m *Summary_Image) XXX_Size() int { + return xxx_messageInfo_Summary_Image.Size(m) +} +func (m *Summary_Image) XXX_DiscardUnknown() { + xxx_messageInfo_Summary_Image.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary_Image proto.InternalMessageInfo + +func (m *Summary_Image) GetHeight() int32 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Summary_Image) GetWidth() int32 { + if m != nil { + return m.Width + } + return 0 +} + +func (m *Summary_Image) GetColorspace() int32 { + if m != nil { + return m.Colorspace + } + return 0 +} + +func (m *Summary_Image) GetEncodedImageString() []byte { + if m != nil { + return m.EncodedImageString + } + return nil +} + +type Summary_Audio struct { + // Sample rate of the audio in Hz. + SampleRate float32 `protobuf:"fixed32,1,opt,name=sample_rate,json=sampleRate,proto3" json:"sample_rate,omitempty"` + // Number of channels of audio. + NumChannels int64 `protobuf:"varint,2,opt,name=num_channels,json=numChannels,proto3" json:"num_channels,omitempty"` + // Length of the audio in frames (samples per channel). + LengthFrames int64 `protobuf:"varint,3,opt,name=length_frames,json=lengthFrames,proto3" json:"length_frames,omitempty"` + // Encoded audio data and its associated RFC 2045 content type (e.g. + // "audio/wav"). + EncodedAudioString []byte `protobuf:"bytes,4,opt,name=encoded_audio_string,json=encodedAudioString,proto3" json:"encoded_audio_string,omitempty"` + ContentType string `protobuf:"bytes,5,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Summary_Audio) Reset() { *m = Summary_Audio{} } +func (m *Summary_Audio) String() string { return proto.CompactTextString(m) } +func (*Summary_Audio) ProtoMessage() {} +func (*Summary_Audio) Descriptor() ([]byte, []int) { + return fileDescriptor_80d4b41d3e8d8b09, []int{3, 1} +} + +func (m *Summary_Audio) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Summary_Audio.Unmarshal(m, b) +} +func (m *Summary_Audio) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Summary_Audio.Marshal(b, m, deterministic) +} +func (m *Summary_Audio) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary_Audio.Merge(m, src) +} +func (m *Summary_Audio) XXX_Size() int { + return xxx_messageInfo_Summary_Audio.Size(m) +} +func (m *Summary_Audio) XXX_DiscardUnknown() { + xxx_messageInfo_Summary_Audio.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary_Audio proto.InternalMessageInfo + +func (m *Summary_Audio) GetSampleRate() float32 { + if m != nil { + return m.SampleRate + } + return 0 +} + +func (m *Summary_Audio) GetNumChannels() int64 { + if m != nil { + return m.NumChannels + } + return 0 +} + +func (m *Summary_Audio) GetLengthFrames() int64 { + if m != nil { + return m.LengthFrames + } + return 0 +} + +func (m *Summary_Audio) GetEncodedAudioString() []byte { + if m != nil { + return m.EncodedAudioString + } + return nil +} + +func (m *Summary_Audio) GetContentType() string { + if m != nil { + return m.ContentType + } + return "" +} + +type Summary_Value struct { + // This field is deprecated and will not be set. + NodeName string `protobuf:"bytes,7,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` + // Tag name for the data. Used by TensorBoard plugins to organize data. Tags + // are often organized by scope (which contains slashes to convey + // hierarchy). For example: foo/bar/0 + Tag string `protobuf:"bytes,1,opt,name=tag,proto3" json:"tag,omitempty"` + // Contains metadata on the summary value such as which plugins may use it. + // Take note that many summary values may lack a metadata field. This is + // because the FileWriter only keeps a metadata object on the first summary + // value with a certain tag for each tag. TensorBoard then remembers which + // tags are associated with which plugins. This saves space. + Metadata *SummaryMetadata `protobuf:"bytes,9,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Value associated with the tag. + // + // Types that are valid to be assigned to Value: + // *Summary_Value_SimpleValue + // *Summary_Value_ObsoleteOldStyleHistogram + // *Summary_Value_Image + // *Summary_Value_Histo + // *Summary_Value_Audio + // *Summary_Value_Tensor + Value isSummary_Value_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Summary_Value) Reset() { *m = Summary_Value{} } +func (m *Summary_Value) String() string { return proto.CompactTextString(m) } +func (*Summary_Value) ProtoMessage() {} +func (*Summary_Value) Descriptor() ([]byte, []int) { + return fileDescriptor_80d4b41d3e8d8b09, []int{3, 2} +} + +func (m *Summary_Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Summary_Value.Unmarshal(m, b) +} +func (m *Summary_Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Summary_Value.Marshal(b, m, deterministic) +} +func (m *Summary_Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary_Value.Merge(m, src) +} +func (m *Summary_Value) XXX_Size() int { + return xxx_messageInfo_Summary_Value.Size(m) +} +func (m *Summary_Value) XXX_DiscardUnknown() { + xxx_messageInfo_Summary_Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary_Value proto.InternalMessageInfo + +func (m *Summary_Value) GetNodeName() string { + if m != nil { + return m.NodeName + } + return "" +} + +func (m *Summary_Value) GetTag() string { + if m != nil { + return m.Tag + } + return "" +} + +func (m *Summary_Value) GetMetadata() *SummaryMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +type isSummary_Value_Value interface { + isSummary_Value_Value() +} + +type Summary_Value_SimpleValue struct { + SimpleValue float32 `protobuf:"fixed32,2,opt,name=simple_value,json=simpleValue,proto3,oneof"` +} + +type Summary_Value_ObsoleteOldStyleHistogram struct { + ObsoleteOldStyleHistogram []byte `protobuf:"bytes,3,opt,name=obsolete_old_style_histogram,json=obsoleteOldStyleHistogram,proto3,oneof"` +} + +type Summary_Value_Image struct { + Image *Summary_Image `protobuf:"bytes,4,opt,name=image,proto3,oneof"` +} + +type Summary_Value_Histo struct { + Histo *HistogramProto `protobuf:"bytes,5,opt,name=histo,proto3,oneof"` +} + +type Summary_Value_Audio struct { + Audio *Summary_Audio `protobuf:"bytes,6,opt,name=audio,proto3,oneof"` +} + +type Summary_Value_Tensor struct { + Tensor *TensorProto `protobuf:"bytes,8,opt,name=tensor,proto3,oneof"` +} + +func (*Summary_Value_SimpleValue) isSummary_Value_Value() {} + +func (*Summary_Value_ObsoleteOldStyleHistogram) isSummary_Value_Value() {} + +func (*Summary_Value_Image) isSummary_Value_Value() {} + +func (*Summary_Value_Histo) isSummary_Value_Value() {} + +func (*Summary_Value_Audio) isSummary_Value_Value() {} + +func (*Summary_Value_Tensor) isSummary_Value_Value() {} + +func (m *Summary_Value) GetValue() isSummary_Value_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Summary_Value) GetSimpleValue() float32 { + if x, ok := m.GetValue().(*Summary_Value_SimpleValue); ok { + return x.SimpleValue + } + return 0 +} + +func (m *Summary_Value) GetObsoleteOldStyleHistogram() []byte { + if x, ok := m.GetValue().(*Summary_Value_ObsoleteOldStyleHistogram); ok { + return x.ObsoleteOldStyleHistogram + } + return nil +} + +func (m *Summary_Value) GetImage() *Summary_Image { + if x, ok := m.GetValue().(*Summary_Value_Image); ok { + return x.Image + } + return nil +} + +func (m *Summary_Value) GetHisto() *HistogramProto { + if x, ok := m.GetValue().(*Summary_Value_Histo); ok { + return x.Histo + } + return nil +} + +func (m *Summary_Value) GetAudio() *Summary_Audio { + if x, ok := m.GetValue().(*Summary_Value_Audio); ok { + return x.Audio + } + return nil +} + +func (m *Summary_Value) GetTensor() *TensorProto { + if x, ok := m.GetValue().(*Summary_Value_Tensor); ok { + return x.Tensor + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Summary_Value) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Summary_Value_SimpleValue)(nil), + (*Summary_Value_ObsoleteOldStyleHistogram)(nil), + (*Summary_Value_Image)(nil), + (*Summary_Value_Histo)(nil), + (*Summary_Value_Audio)(nil), + (*Summary_Value_Tensor)(nil), + } +} + +func init() { + proto.RegisterType((*SummaryDescription)(nil), "tensorflow.SummaryDescription") + proto.RegisterType((*HistogramProto)(nil), "tensorflow.HistogramProto") + proto.RegisterType((*SummaryMetadata)(nil), "tensorflow.SummaryMetadata") + proto.RegisterType((*SummaryMetadata_PluginData)(nil), "tensorflow.SummaryMetadata.PluginData") + proto.RegisterType((*Summary)(nil), "tensorflow.Summary") + proto.RegisterType((*Summary_Image)(nil), "tensorflow.Summary.Image") + proto.RegisterType((*Summary_Audio)(nil), "tensorflow.Summary.Audio") + proto.RegisterType((*Summary_Value)(nil), "tensorflow.Summary.Value") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/summary.proto", fileDescriptor_80d4b41d3e8d8b09) +} + +var fileDescriptor_80d4b41d3e8d8b09 = []byte{ + // 771 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xcd, 0x72, 0x23, 0x35, + 0x10, 0xce, 0x78, 0x18, 0x3b, 0xee, 0xf1, 0x42, 0x4a, 0x6c, 0xc1, 0xac, 0x97, 0x82, 0xc5, 0x5b, + 0x2c, 0x39, 0xd9, 0xc4, 0x1c, 0x38, 0x71, 0x58, 0xb3, 0xc5, 0x9a, 0x2a, 0x7e, 0x52, 0x72, 0x8a, + 0x03, 0x97, 0x29, 0x79, 0x46, 0x19, 0xab, 0x32, 0x92, 0x86, 0x91, 0x86, 0xc4, 0x4f, 0xc0, 0xd3, + 0xf0, 0x06, 0xb9, 0xf2, 0x4c, 0x70, 0xa4, 0xd4, 0x52, 0x6c, 0x13, 0x12, 0x6e, 0xd2, 0xa7, 0xaf, + 0xf5, 0xb5, 0xfa, 0x6b, 0x35, 0x7c, 0x6e, 0xb9, 0x32, 0xba, 0xbd, 0xac, 0xf5, 0xf5, 0xac, 0xd0, + 0x2d, 0x9f, 0x5d, 0xb6, 0x4c, 0xf2, 0x6b, 0xdd, 0x5e, 0xcd, 0x4c, 0x27, 0x25, 0x6b, 0xb7, 0xd3, + 0xa6, 0xd5, 0x56, 0x13, 0xd8, 0x13, 0xc7, 0xaf, 0x1e, 0x0f, 0xf2, 0x27, 0x3e, 0x66, 0x72, 0x06, + 0x64, 0xe5, 0x2f, 0x79, 0xc3, 0x4d, 0xd1, 0x8a, 0xc6, 0x0a, 0xad, 0xc8, 0x73, 0x18, 0xda, 0x6d, + 0xc3, 0xf3, 0x8d, 0x50, 0x36, 0x8b, 0x5e, 0x44, 0xa7, 0x43, 0x7a, 0xec, 0x80, 0xa5, 0x50, 0x76, + 0x72, 0x1b, 0xc1, 0xbb, 0x4b, 0x61, 0xac, 0xae, 0x5a, 0x26, 0xcf, 0x51, 0xf9, 0x04, 0x62, 0x29, + 0x14, 0x32, 0x23, 0xea, 0x96, 0x88, 0xb0, 0x9b, 0xac, 0x17, 0x10, 0x76, 0xe3, 0x10, 0xd5, 0xc9, + 0x2c, 0xf6, 0x88, 0xea, 0xa4, 0x43, 0x4c, 0x27, 0xb3, 0x77, 0x3c, 0x62, 0x3a, 0x49, 0x3e, 0x81, + 0xd4, 0x74, 0x32, 0x37, 0xbf, 0x76, 0xac, 0xe5, 0x26, 0x4b, 0xf0, 0x04, 0x4c, 0x27, 0x57, 0x1e, + 0x21, 0x9f, 0xc1, 0x68, 0xdd, 0x15, 0x57, 0xdc, 0xe6, 0xb5, 0x90, 0xc2, 0x66, 0xfd, 0x17, 0xf1, + 0x69, 0xb4, 0xe8, 0x9d, 0x44, 0x34, 0xf5, 0xf8, 0xf7, 0x0e, 0x26, 0x63, 0xe8, 0xfb, 0x6d, 0x36, + 0xd8, 0x11, 0x02, 0x32, 0xf9, 0x2b, 0x82, 0xf7, 0xc2, 0x93, 0x7f, 0xe0, 0x96, 0x95, 0xcc, 0x32, + 0xf2, 0x16, 0xd2, 0xa6, 0xee, 0x2a, 0xa1, 0x72, 0xb7, 0xc5, 0x77, 0xa4, 0xf3, 0x57, 0xd3, 0x7d, + 0x0d, 0xa7, 0xf7, 0x22, 0xa6, 0xe7, 0x48, 0x7f, 0xc3, 0x2c, 0xa3, 0xd0, 0xec, 0xd6, 0xe4, 0x53, + 0x18, 0x95, 0xc2, 0x34, 0x35, 0xdb, 0xe6, 0x8a, 0x49, 0x8e, 0xef, 0x1f, 0xd2, 0x34, 0x60, 0x3f, + 0x32, 0xc9, 0xc9, 0x0c, 0xde, 0x0f, 0xb6, 0xe5, 0xe5, 0xbe, 0xe4, 0x58, 0x97, 0x21, 0x25, 0xe6, + 0x3f, 0x66, 0x8c, 0xdf, 0x02, 0xec, 0xd5, 0x5c, 0x89, 0x42, 0xaa, 0x28, 0xe0, 0xcd, 0x09, 0x29, + 0xe0, 0xfd, 0x19, 0x0c, 0x0a, 0xad, 0x2c, 0x57, 0x16, 0xd5, 0x47, 0xf4, 0x6e, 0x3b, 0xb9, 0xed, + 0xc3, 0x20, 0xbc, 0x83, 0xcc, 0x20, 0xf9, 0x8d, 0xd5, 0x9d, 0xbb, 0x20, 0x3e, 0x4d, 0xe7, 0xcf, + 0x1e, 0x78, 0xeb, 0xf4, 0x67, 0x47, 0xa0, 0x9e, 0x37, 0xfe, 0x3d, 0x82, 0xe4, 0x3b, 0xc9, 0x2a, + 0x4e, 0x3e, 0x80, 0xfe, 0x86, 0x8b, 0x6a, 0xe3, 0x3b, 0x23, 0xa1, 0x61, 0x47, 0x9e, 0x42, 0x72, + 0x2d, 0x4a, 0xbb, 0x41, 0xd9, 0x84, 0xfa, 0x0d, 0xf9, 0x18, 0xa0, 0xd0, 0xb5, 0x6e, 0x4d, 0xc3, + 0x0a, 0x8e, 0xaf, 0x4c, 0xe8, 0x01, 0x42, 0xbe, 0x80, 0xa7, 0x5c, 0x15, 0xba, 0xe4, 0x65, 0x2e, + 0xdc, 0xf5, 0xb9, 0xb1, 0xad, 0x50, 0x15, 0x76, 0xc5, 0x88, 0x92, 0x70, 0x86, 0xca, 0x2b, 0x3c, + 0x19, 0xff, 0x19, 0x41, 0xf2, 0xba, 0x2b, 0x85, 0xc6, 0x76, 0x61, 0xb2, 0xa9, 0x79, 0xde, 0x32, + 0xeb, 0x6b, 0xd1, 0xa3, 0xe0, 0x21, 0xca, 0x2c, 0x77, 0x76, 0xa8, 0x4e, 0xe6, 0xc5, 0x86, 0x29, + 0xc5, 0x6b, 0x83, 0x99, 0xc5, 0x34, 0x55, 0x9d, 0xfc, 0x26, 0x40, 0xe4, 0x25, 0x3c, 0xa9, 0xb9, + 0xaa, 0xec, 0x26, 0xc7, 0x1f, 0x62, 0x30, 0xc5, 0x98, 0x8e, 0x3c, 0xf8, 0x2d, 0x62, 0x87, 0x49, + 0x32, 0xa7, 0xfc, 0x70, 0x92, 0x98, 0x94, 0x4f, 0xd2, 0x29, 0x87, 0xb2, 0xe7, 0xee, 0xe3, 0x60, + 0x2b, 0x0f, 0x69, 0x1a, 0xb0, 0x8b, 0x6d, 0xc3, 0xc7, 0x7f, 0xc4, 0x90, 0x60, 0x89, 0xdd, 0x77, + 0x53, 0xba, 0xe4, 0xde, 0xd1, 0x81, 0xff, 0x6e, 0x0e, 0x40, 0x3f, 0x4f, 0x20, 0xb6, 0xac, 0x0a, + 0x46, 0xbb, 0x25, 0xf9, 0x0a, 0x8e, 0x65, 0xe8, 0xc3, 0x6c, 0x88, 0xad, 0xfa, 0xfc, 0x7f, 0x5a, + 0x95, 0xee, 0xc8, 0xe4, 0x25, 0x8c, 0x8c, 0xc0, 0x7a, 0x79, 0xef, 0x5d, 0x39, 0x7a, 0xcb, 0x23, + 0x9a, 0x7a, 0xd4, 0x27, 0xf3, 0x1a, 0x3e, 0xd2, 0x6b, 0xa3, 0x6b, 0x6e, 0x79, 0xae, 0xeb, 0x32, + 0x37, 0x76, 0x5b, 0xbb, 0x49, 0x10, 0x3e, 0x3c, 0xd6, 0x67, 0xb4, 0x3c, 0xa2, 0xcf, 0xee, 0x58, + 0x3f, 0xd5, 0xe5, 0xca, 0x71, 0x76, 0x33, 0x81, 0x9c, 0x41, 0x82, 0x5e, 0x62, 0x7d, 0x1e, 0x69, + 0x2e, 0x74, 0x74, 0x79, 0x44, 0x3d, 0x93, 0xcc, 0x21, 0x41, 0x09, 0x2c, 0x54, 0x3a, 0x1f, 0x1f, + 0x86, 0xfc, 0x7b, 0xd8, 0xb8, 0x18, 0xa4, 0x3a, 0x19, 0x74, 0x23, 0xeb, 0x3f, 0x2e, 0x83, 0x9e, + 0xb8, 0x10, 0x64, 0x92, 0x33, 0xe8, 0x7b, 0x52, 0x76, 0x8c, 0x31, 0x1f, 0x1e, 0xc6, 0x5c, 0xe0, + 0xf2, 0x4e, 0x24, 0x10, 0x17, 0x83, 0xf0, 0x53, 0x16, 0x12, 0x32, 0xdd, 0x56, 0x87, 0x01, 0xbb, + 0x99, 0xba, 0x78, 0x12, 0xf4, 0x30, 0xd8, 0x9c, 0x47, 0xbf, 0x7c, 0x5d, 0x09, 0xbb, 0xe9, 0xd6, + 0xd3, 0x42, 0xcb, 0xd9, 0xc1, 0x28, 0x7e, 0x78, 0x59, 0xe9, 0x7b, 0x33, 0xfa, 0xef, 0x28, 0x5a, + 0xf7, 0x71, 0x40, 0x7f, 0xf9, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x97, 0x21, 0x44, 0xa9, 0xff, + 0x05, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/summary.proto b/executor/proto/tensorflow/core/framework/summary.proto new file mode 100644 index 0000000000..532e4fcd87 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/summary.proto @@ -0,0 +1,124 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "SummaryProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; +import "tensorflow/core/framework/tensor.proto"; + +// Metadata associated with a series of Summary data +message SummaryDescription { + // Hint on how plugins should process the data in this series. + // Supported values include "scalar", "histogram", "image", "audio" + string type_hint = 1; +} + +// Serialization format for histogram module in +// core/lib/histogram/histogram.h +message HistogramProto { + double min = 1; + double max = 2; + double num = 3; + double sum = 4; + double sum_squares = 5; + + // Parallel arrays encoding the bucket boundaries and the bucket values. + // bucket(i) is the count for the bucket i. The range for + // a bucket is: + // i == 0: -DBL_MAX .. bucket_limit(0) + // i != 0: bucket_limit(i-1) .. bucket_limit(i) + repeated double bucket_limit = 6 [packed = true]; + repeated double bucket = 7 [packed = true]; +}; + +// A SummaryMetadata encapsulates information on which plugins are able to make +// use of a certain summary value. +message SummaryMetadata { + message PluginData { + // The name of the plugin this data pertains to. + string plugin_name = 1; + + // The content to store for the plugin. The best practice is for this to be + // a binary serialized protocol buffer. + bytes content = 2; + } + + // Data that associates a summary with a certain plugin. + PluginData plugin_data = 1; + + // Display name for viewing in TensorBoard. + string display_name = 2; + + // Longform readable description of the summary sequence. Markdown supported. + string summary_description = 3; +}; + +// A Summary is a set of named values to be displayed by the +// visualizer. +// +// Summaries are produced regularly during training, as controlled by +// the "summary_interval_secs" attribute of the training operation. +// Summaries are also produced at the end of an evaluation. +message Summary { + message Image { + // Dimensions of the image. + int32 height = 1; + int32 width = 2; + // Valid colorspace values are + // 1 - grayscale + // 2 - grayscale + alpha + // 3 - RGB + // 4 - RGBA + // 5 - DIGITAL_YUV + // 6 - BGRA + int32 colorspace = 3; + // Image data in encoded format. All image formats supported by + // image_codec::CoderUtil can be stored here. + bytes encoded_image_string = 4; + } + + message Audio { + // Sample rate of the audio in Hz. + float sample_rate = 1; + // Number of channels of audio. + int64 num_channels = 2; + // Length of the audio in frames (samples per channel). + int64 length_frames = 3; + // Encoded audio data and its associated RFC 2045 content type (e.g. + // "audio/wav"). + bytes encoded_audio_string = 4; + string content_type = 5; + } + + message Value { + // This field is deprecated and will not be set. + string node_name = 7; + + // Tag name for the data. Used by TensorBoard plugins to organize data. Tags + // are often organized by scope (which contains slashes to convey + // hierarchy). For example: foo/bar/0 + string tag = 1; + + // Contains metadata on the summary value such as which plugins may use it. + // Take note that many summary values may lack a metadata field. This is + // because the FileWriter only keeps a metadata object on the first summary + // value with a certain tag for each tag. TensorBoard then remembers which + // tags are associated with which plugins. This saves space. + SummaryMetadata metadata = 9; + + // Value associated with the tag. + oneof value { + float simple_value = 2; + bytes obsolete_old_style_histogram = 3; + Image image = 4; + HistogramProto histo = 5; + Audio audio = 6; + TensorProto tensor = 8; + } + } + + // Set of values for the summary. + repeated Value value = 1; +} diff --git a/executor/proto/tensorflow/core/framework/tensor.pb.go b/executor/proto/tensorflow/core/framework/tensor.pb.go new file mode 100644 index 0000000000..2e895c4746 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/tensor.pb.go @@ -0,0 +1,324 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/tensor.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Protocol buffer representing a tensor. +type TensorProto struct { + Dtype DataType `protobuf:"varint,1,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"` + // Shape of the tensor. TODO(touts): sort out the 0-rank issues. + TensorShape *TensorShapeProto `protobuf:"bytes,2,opt,name=tensor_shape,json=tensorShape,proto3" json:"tensor_shape,omitempty"` + // Version number. + // + // In version 0, if the "repeated xxx" representations contain only one + // element, that element is repeated to fill the shape. This makes it easy + // to represent a constant Tensor with a single value. + VersionNumber int32 `protobuf:"varint,3,opt,name=version_number,json=versionNumber,proto3" json:"version_number,omitempty"` + // Serialized raw tensor content from either Tensor::AsProtoTensorContent or + // memcpy in tensorflow::grpc::EncodeTensorToByteBuffer. This representation + // can be used for all tensor types. The purpose of this representation is to + // reduce serialization overhead during RPC call by avoiding serialization of + // many repeated small items. + TensorContent []byte `protobuf:"bytes,4,opt,name=tensor_content,json=tensorContent,proto3" json:"tensor_content,omitempty"` + // DT_HALF, DT_BFLOAT16. Note that since protobuf has no int16 type, we'll + // have some pointless zero padding for each value here. + HalfVal []int32 `protobuf:"varint,13,rep,packed,name=half_val,json=halfVal,proto3" json:"half_val,omitempty"` + // DT_FLOAT. + FloatVal []float32 `protobuf:"fixed32,5,rep,packed,name=float_val,json=floatVal,proto3" json:"float_val,omitempty"` + // DT_DOUBLE. + DoubleVal []float64 `protobuf:"fixed64,6,rep,packed,name=double_val,json=doubleVal,proto3" json:"double_val,omitempty"` + // DT_INT32, DT_INT16, DT_INT8, DT_UINT8. + IntVal []int32 `protobuf:"varint,7,rep,packed,name=int_val,json=intVal,proto3" json:"int_val,omitempty"` + // DT_STRING + StringVal [][]byte `protobuf:"bytes,8,rep,name=string_val,json=stringVal,proto3" json:"string_val,omitempty"` + // DT_COMPLEX64. scomplex_val(2*i) and scomplex_val(2*i+1) are real + // and imaginary parts of i-th single precision complex. + ScomplexVal []float32 `protobuf:"fixed32,9,rep,packed,name=scomplex_val,json=scomplexVal,proto3" json:"scomplex_val,omitempty"` + // DT_INT64 + Int64Val []int64 `protobuf:"varint,10,rep,packed,name=int64_val,json=int64Val,proto3" json:"int64_val,omitempty"` + // DT_BOOL + BoolVal []bool `protobuf:"varint,11,rep,packed,name=bool_val,json=boolVal,proto3" json:"bool_val,omitempty"` + // DT_COMPLEX128. dcomplex_val(2*i) and dcomplex_val(2*i+1) are real + // and imaginary parts of i-th double precision complex. + DcomplexVal []float64 `protobuf:"fixed64,12,rep,packed,name=dcomplex_val,json=dcomplexVal,proto3" json:"dcomplex_val,omitempty"` + // DT_RESOURCE + ResourceHandleVal []*ResourceHandleProto `protobuf:"bytes,14,rep,name=resource_handle_val,json=resourceHandleVal,proto3" json:"resource_handle_val,omitempty"` + // DT_VARIANT + VariantVal []*VariantTensorDataProto `protobuf:"bytes,15,rep,name=variant_val,json=variantVal,proto3" json:"variant_val,omitempty"` + // DT_UINT32 + Uint32Val []uint32 `protobuf:"varint,16,rep,packed,name=uint32_val,json=uint32Val,proto3" json:"uint32_val,omitempty"` + // DT_UINT64 + Uint64Val []uint64 `protobuf:"varint,17,rep,packed,name=uint64_val,json=uint64Val,proto3" json:"uint64_val,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TensorProto) Reset() { *m = TensorProto{} } +func (m *TensorProto) String() string { return proto.CompactTextString(m) } +func (*TensorProto) ProtoMessage() {} +func (*TensorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_efa68180bc31e4fc, []int{0} +} + +func (m *TensorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TensorProto.Unmarshal(m, b) +} +func (m *TensorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TensorProto.Marshal(b, m, deterministic) +} +func (m *TensorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_TensorProto.Merge(m, src) +} +func (m *TensorProto) XXX_Size() int { + return xxx_messageInfo_TensorProto.Size(m) +} +func (m *TensorProto) XXX_DiscardUnknown() { + xxx_messageInfo_TensorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_TensorProto proto.InternalMessageInfo + +func (m *TensorProto) GetDtype() DataType { + if m != nil { + return m.Dtype + } + return DataType_DT_INVALID +} + +func (m *TensorProto) GetTensorShape() *TensorShapeProto { + if m != nil { + return m.TensorShape + } + return nil +} + +func (m *TensorProto) GetVersionNumber() int32 { + if m != nil { + return m.VersionNumber + } + return 0 +} + +func (m *TensorProto) GetTensorContent() []byte { + if m != nil { + return m.TensorContent + } + return nil +} + +func (m *TensorProto) GetHalfVal() []int32 { + if m != nil { + return m.HalfVal + } + return nil +} + +func (m *TensorProto) GetFloatVal() []float32 { + if m != nil { + return m.FloatVal + } + return nil +} + +func (m *TensorProto) GetDoubleVal() []float64 { + if m != nil { + return m.DoubleVal + } + return nil +} + +func (m *TensorProto) GetIntVal() []int32 { + if m != nil { + return m.IntVal + } + return nil +} + +func (m *TensorProto) GetStringVal() [][]byte { + if m != nil { + return m.StringVal + } + return nil +} + +func (m *TensorProto) GetScomplexVal() []float32 { + if m != nil { + return m.ScomplexVal + } + return nil +} + +func (m *TensorProto) GetInt64Val() []int64 { + if m != nil { + return m.Int64Val + } + return nil +} + +func (m *TensorProto) GetBoolVal() []bool { + if m != nil { + return m.BoolVal + } + return nil +} + +func (m *TensorProto) GetDcomplexVal() []float64 { + if m != nil { + return m.DcomplexVal + } + return nil +} + +func (m *TensorProto) GetResourceHandleVal() []*ResourceHandleProto { + if m != nil { + return m.ResourceHandleVal + } + return nil +} + +func (m *TensorProto) GetVariantVal() []*VariantTensorDataProto { + if m != nil { + return m.VariantVal + } + return nil +} + +func (m *TensorProto) GetUint32Val() []uint32 { + if m != nil { + return m.Uint32Val + } + return nil +} + +func (m *TensorProto) GetUint64Val() []uint64 { + if m != nil { + return m.Uint64Val + } + return nil +} + +// Protocol buffer representing the serialization format of DT_VARIANT tensors. +type VariantTensorDataProto struct { + // Name of the type of objects being serialized. + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + // Portions of the object that are not Tensors. + Metadata []byte `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Tensors contained within objects being serialized. + Tensors []*TensorProto `protobuf:"bytes,3,rep,name=tensors,proto3" json:"tensors,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VariantTensorDataProto) Reset() { *m = VariantTensorDataProto{} } +func (m *VariantTensorDataProto) String() string { return proto.CompactTextString(m) } +func (*VariantTensorDataProto) ProtoMessage() {} +func (*VariantTensorDataProto) Descriptor() ([]byte, []int) { + return fileDescriptor_efa68180bc31e4fc, []int{1} +} + +func (m *VariantTensorDataProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VariantTensorDataProto.Unmarshal(m, b) +} +func (m *VariantTensorDataProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VariantTensorDataProto.Marshal(b, m, deterministic) +} +func (m *VariantTensorDataProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_VariantTensorDataProto.Merge(m, src) +} +func (m *VariantTensorDataProto) XXX_Size() int { + return xxx_messageInfo_VariantTensorDataProto.Size(m) +} +func (m *VariantTensorDataProto) XXX_DiscardUnknown() { + xxx_messageInfo_VariantTensorDataProto.DiscardUnknown(m) +} + +var xxx_messageInfo_VariantTensorDataProto proto.InternalMessageInfo + +func (m *VariantTensorDataProto) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *VariantTensorDataProto) GetMetadata() []byte { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *VariantTensorDataProto) GetTensors() []*TensorProto { + if m != nil { + return m.Tensors + } + return nil +} + +func init() { + proto.RegisterType((*TensorProto)(nil), "tensorflow.TensorProto") + proto.RegisterType((*VariantTensorDataProto)(nil), "tensorflow.VariantTensorDataProto") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/tensor.proto", fileDescriptor_efa68180bc31e4fc) +} + +var fileDescriptor_efa68180bc31e4fc = []byte{ + // 565 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x94, 0x5f, 0x6f, 0xd3, 0x3c, + 0x14, 0xc6, 0x95, 0x79, 0x5d, 0x93, 0x93, 0x74, 0xef, 0x96, 0x17, 0x41, 0xb4, 0x31, 0xcd, 0x4c, + 0x2a, 0xb2, 0x10, 0x6a, 0x45, 0x87, 0xb8, 0x43, 0x48, 0x1d, 0x17, 0x5c, 0x8d, 0x29, 0x4c, 0xbb, + 0xe0, 0xa6, 0x72, 0x1b, 0xb7, 0x8d, 0x48, 0xec, 0xca, 0x71, 0x3b, 0x76, 0xcf, 0x17, 0xe4, 0xdb, + 0x70, 0x89, 0x7c, 0x9c, 0xb6, 0x61, 0x0c, 0xb8, 0x6b, 0x9f, 0xf3, 0x3b, 0xcf, 0xe3, 0x3f, 0xc7, + 0x81, 0xe7, 0x46, 0xc8, 0x4a, 0xe9, 0x69, 0xa1, 0x6e, 0xfb, 0x13, 0xa5, 0x45, 0x7f, 0xaa, 0x79, + 0x29, 0x6e, 0x95, 0xfe, 0xd2, 0x77, 0x95, 0xde, 0x42, 0x2b, 0xa3, 0x62, 0xd8, 0x72, 0x47, 0xfd, + 0x3f, 0xf7, 0x68, 0x51, 0xa9, 0xa5, 0x9e, 0x88, 0xd1, 0x9c, 0xcb, 0xac, 0x10, 0xae, 0xf9, 0xe8, + 0xe5, 0xbf, 0x42, 0x46, 0xd5, 0x9c, 0x2f, 0xd6, 0x74, 0xf7, 0x2f, 0xf4, 0xdd, 0x42, 0x54, 0x0e, + 0x3b, 0xfb, 0xde, 0x82, 0xf0, 0x1a, 0xc9, 0x2b, 0x5c, 0xe1, 0x0b, 0x68, 0x65, 0xb6, 0x9e, 0x78, + 0xd4, 0x63, 0xfb, 0x83, 0x47, 0xbd, 0xad, 0x4d, 0xef, 0x3d, 0x37, 0xfc, 0xfa, 0x6e, 0x21, 0x52, + 0x87, 0xc4, 0xef, 0x20, 0x6a, 0x06, 0x27, 0x3b, 0xd4, 0x63, 0xe1, 0xe0, 0x69, 0xb3, 0xc5, 0x59, + 0x7f, 0xb2, 0x65, 0xf4, 0x4f, 0x43, 0xb3, 0x55, 0xe2, 0x2e, 0xec, 0xaf, 0x84, 0xae, 0x72, 0x25, + 0x47, 0x72, 0x59, 0x8e, 0x85, 0x4e, 0x08, 0xf5, 0x58, 0x2b, 0xed, 0xd4, 0xea, 0x25, 0x8a, 0x16, + 0xab, 0x73, 0x26, 0x4a, 0x1a, 0x21, 0x4d, 0xb2, 0x4b, 0x3d, 0x16, 0xa5, 0x1d, 0xa7, 0x5e, 0x38, + 0x31, 0x3e, 0x01, 0x7f, 0xce, 0x8b, 0xe9, 0x68, 0xc5, 0x8b, 0xa4, 0x43, 0x09, 0x6b, 0x0d, 0x77, + 0x0e, 0xbc, 0xb4, 0x6d, 0xb5, 0x1b, 0x5e, 0xc4, 0xa7, 0x10, 0x4c, 0x0b, 0xc5, 0x0d, 0xd6, 0x5b, + 0x94, 0xb0, 0x1d, 0xac, 0xfb, 0x28, 0x5a, 0xe0, 0x19, 0x40, 0xa6, 0x96, 0xe3, 0x42, 0x20, 0xb1, + 0x47, 0x09, 0xf3, 0x90, 0x08, 0x9c, 0x6a, 0x91, 0x63, 0x68, 0xe7, 0xd2, 0x39, 0xb4, 0x37, 0x09, + 0x7b, 0xb9, 0xc4, 0xfe, 0x13, 0x80, 0xca, 0xe8, 0x5c, 0xce, 0xb0, 0xee, 0x53, 0xc2, 0xa2, 0x34, + 0x70, 0x8a, 0x2d, 0x77, 0x21, 0xaa, 0x26, 0xaa, 0x5c, 0x14, 0xe2, 0x2b, 0x02, 0xc1, 0x66, 0x09, + 0xe1, 0x5a, 0xaf, 0x97, 0x99, 0x4b, 0xf3, 0xe6, 0x35, 0x32, 0x40, 0x09, 0x23, 0x6e, 0x99, 0x28, + 0xba, 0x18, 0x7f, 0xac, 0x54, 0x81, 0xf5, 0x90, 0x12, 0xe6, 0xbb, 0x6d, 0x5a, 0xad, 0x8e, 0xc9, + 0x9a, 0x31, 0xd1, 0x66, 0x1f, 0x61, 0xd6, 0x88, 0xf9, 0x08, 0xff, 0xdf, 0x9b, 0x32, 0xa4, 0xf7, + 0x29, 0x61, 0xe1, 0xe0, 0xb4, 0x79, 0x85, 0x69, 0x8d, 0x7d, 0x40, 0xca, 0xdd, 0xe2, 0xa1, 0xfe, + 0x45, 0xb4, 0x86, 0x17, 0x10, 0xae, 0xb8, 0xce, 0x79, 0x7d, 0x3c, 0xff, 0xa1, 0xd1, 0x59, 0xd3, + 0xe8, 0xc6, 0x95, 0xdd, 0x48, 0xd8, 0x59, 0x72, 0x5e, 0x50, 0xb7, 0xd5, 0x57, 0xb0, 0xcc, 0xa5, + 0x39, 0x1f, 0xa0, 0xc7, 0x01, 0x25, 0xac, 0xe3, 0xae, 0xc0, 0xa9, 0x0d, 0xa4, 0x3e, 0xa0, 0x43, + 0x4a, 0xd8, 0xee, 0x16, 0xc1, 0x13, 0x3a, 0xfb, 0xe6, 0xc1, 0xe3, 0x87, 0xc3, 0xe2, 0x63, 0x08, + 0xec, 0xe8, 0x8e, 0x24, 0x2f, 0xdd, 0x88, 0x07, 0xa9, 0x6f, 0x85, 0x4b, 0x5e, 0x8a, 0xf8, 0x08, + 0xfc, 0x52, 0x18, 0x9e, 0x71, 0xc3, 0x71, 0x96, 0xa3, 0x74, 0xf3, 0x3f, 0x7e, 0x05, 0x6d, 0xb7, + 0x95, 0x2a, 0x21, 0xb8, 0xb5, 0x27, 0xbf, 0x8f, 0xb9, 0xdb, 0xcf, 0x9a, 0x1b, 0x16, 0x90, 0x28, + 0x3d, 0x6b, 0x62, 0x9b, 0x27, 0x38, 0x8c, 0x1a, 0x1d, 0xd5, 0x95, 0xf7, 0xf9, 0xed, 0x2c, 0x37, + 0xf3, 0xe5, 0xb8, 0x37, 0x51, 0x65, 0xf3, 0xbb, 0xf0, 0xf0, 0xcf, 0x99, 0xba, 0xf7, 0xa2, 0x7f, + 0x78, 0xde, 0x78, 0x0f, 0xdf, 0xf3, 0xf9, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe2, 0x2b, 0x54, + 0xd7, 0x8b, 0x04, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/tensor.proto b/executor/proto/tensorflow/core/framework/tensor.proto new file mode 100644 index 0000000000..55921af1d0 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/tensor.proto @@ -0,0 +1,94 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "TensorProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; +import "tensorflow/core/framework/resource_handle.proto"; +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/types.proto"; + +// Protocol buffer representing a tensor. +message TensorProto { + DataType dtype = 1; + + // Shape of the tensor. TODO(touts): sort out the 0-rank issues. + TensorShapeProto tensor_shape = 2; + + // Only one of the representations below is set, one of "tensor_contents" and + // the "xxx_val" attributes. We are not using oneof because as oneofs cannot + // contain repeated fields it would require another extra set of messages. + + // Version number. + // + // In version 0, if the "repeated xxx" representations contain only one + // element, that element is repeated to fill the shape. This makes it easy + // to represent a constant Tensor with a single value. + int32 version_number = 3; + + // Serialized raw tensor content from either Tensor::AsProtoTensorContent or + // memcpy in tensorflow::grpc::EncodeTensorToByteBuffer. This representation + // can be used for all tensor types. The purpose of this representation is to + // reduce serialization overhead during RPC call by avoiding serialization of + // many repeated small items. + bytes tensor_content = 4; + + // Type specific representations that make it easy to create tensor protos in + // all languages. Only the representation corresponding to "dtype" can + // be set. The values hold the flattened representation of the tensor in + // row major order. + + // DT_HALF, DT_BFLOAT16. Note that since protobuf has no int16 type, we'll + // have some pointless zero padding for each value here. + repeated int32 half_val = 13 [packed = true]; + + // DT_FLOAT. + repeated float float_val = 5 [packed = true]; + + // DT_DOUBLE. + repeated double double_val = 6 [packed = true]; + + // DT_INT32, DT_INT16, DT_INT8, DT_UINT8. + repeated int32 int_val = 7 [packed = true]; + + // DT_STRING + repeated bytes string_val = 8; + + // DT_COMPLEX64. scomplex_val(2*i) and scomplex_val(2*i+1) are real + // and imaginary parts of i-th single precision complex. + repeated float scomplex_val = 9 [packed = true]; + + // DT_INT64 + repeated int64 int64_val = 10 [packed = true]; + + // DT_BOOL + repeated bool bool_val = 11 [packed = true]; + + // DT_COMPLEX128. dcomplex_val(2*i) and dcomplex_val(2*i+1) are real + // and imaginary parts of i-th double precision complex. + repeated double dcomplex_val = 12 [packed = true]; + + // DT_RESOURCE + repeated ResourceHandleProto resource_handle_val = 14; + + // DT_VARIANT + repeated VariantTensorDataProto variant_val = 15; + + // DT_UINT32 + repeated uint32 uint32_val = 16 [packed = true]; + + // DT_UINT64 + repeated uint64 uint64_val = 17 [packed = true]; +}; + +// Protocol buffer representing the serialization format of DT_VARIANT tensors. +message VariantTensorDataProto { + // Name of the type of objects being serialized. + string type_name = 1; + // Portions of the object that are not Tensors. + bytes metadata = 2; + // Tensors contained within objects being serialized. + repeated TensorProto tensors = 3; +} diff --git a/executor/proto/tensorflow/core/framework/tensor_description.pb.go b/executor/proto/tensorflow/core/framework/tensor_description.pb.go new file mode 100644 index 0000000000..1a1e9ec907 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/tensor_description.pb.go @@ -0,0 +1,107 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/tensor_description.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type TensorDescription struct { + // Data type of tensor elements + Dtype DataType `protobuf:"varint,1,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"` + // Shape of the tensor. + Shape *TensorShapeProto `protobuf:"bytes,2,opt,name=shape,proto3" json:"shape,omitempty"` + // Information about the size and allocator used for the data + AllocationDescription *AllocationDescription `protobuf:"bytes,4,opt,name=allocation_description,json=allocationDescription,proto3" json:"allocation_description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TensorDescription) Reset() { *m = TensorDescription{} } +func (m *TensorDescription) String() string { return proto.CompactTextString(m) } +func (*TensorDescription) ProtoMessage() {} +func (*TensorDescription) Descriptor() ([]byte, []int) { + return fileDescriptor_aa203ffb9e427669, []int{0} +} + +func (m *TensorDescription) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TensorDescription.Unmarshal(m, b) +} +func (m *TensorDescription) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TensorDescription.Marshal(b, m, deterministic) +} +func (m *TensorDescription) XXX_Merge(src proto.Message) { + xxx_messageInfo_TensorDescription.Merge(m, src) +} +func (m *TensorDescription) XXX_Size() int { + return xxx_messageInfo_TensorDescription.Size(m) +} +func (m *TensorDescription) XXX_DiscardUnknown() { + xxx_messageInfo_TensorDescription.DiscardUnknown(m) +} + +var xxx_messageInfo_TensorDescription proto.InternalMessageInfo + +func (m *TensorDescription) GetDtype() DataType { + if m != nil { + return m.Dtype + } + return DataType_DT_INVALID +} + +func (m *TensorDescription) GetShape() *TensorShapeProto { + if m != nil { + return m.Shape + } + return nil +} + +func (m *TensorDescription) GetAllocationDescription() *AllocationDescription { + if m != nil { + return m.AllocationDescription + } + return nil +} + +func init() { + proto.RegisterType((*TensorDescription)(nil), "tensorflow.TensorDescription") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/tensor_description.proto", fileDescriptor_aa203ffb9e427669) +} + +var fileDescriptor_aa203ffb9e427669 = []byte{ + // 256 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x2a, 0x49, 0xcd, 0x2b, + 0xce, 0x2f, 0x4a, 0xcb, 0xc9, 0x2f, 0xd7, 0x4f, 0xce, 0x2f, 0x4a, 0xd5, 0x4f, 0x2b, 0x4a, 0xcc, + 0x4d, 0x2d, 0xcf, 0x2f, 0xca, 0xd6, 0x87, 0xc8, 0xc4, 0xa7, 0xa4, 0x16, 0x27, 0x17, 0x65, 0x16, + 0x94, 0x64, 0xe6, 0xe7, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x71, 0x21, 0xf4, 0x48, 0xa9, + 0xe2, 0xd1, 0x5f, 0x59, 0x90, 0x5a, 0x0c, 0xd1, 0x22, 0xa5, 0x43, 0xd0, 0x9a, 0xe2, 0x8c, 0xc4, + 0x82, 0x54, 0xa8, 0x6a, 0x33, 0xdc, 0xaa, 0x13, 0x73, 0x72, 0xf2, 0x93, 0x13, 0x41, 0x8e, 0xc1, + 0x74, 0x98, 0xd2, 0x59, 0x46, 0x2e, 0xc1, 0x10, 0xb0, 0x56, 0x17, 0x84, 0x9c, 0x90, 0x16, 0x17, + 0x6b, 0x0a, 0xc8, 0x2d, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x7c, 0x46, 0x22, 0x7a, 0x08, 0xd3, 0xf5, + 0x5c, 0x12, 0x4b, 0x12, 0x43, 0x2a, 0x0b, 0x52, 0x83, 0x20, 0x4a, 0x84, 0x8c, 0xb8, 0x58, 0xc1, + 0x0e, 0x91, 0x60, 0x52, 0x60, 0xd4, 0xe0, 0x36, 0x92, 0x41, 0x56, 0x0b, 0x31, 0x39, 0x18, 0x24, + 0x1d, 0x00, 0xb2, 0x2e, 0x08, 0xa2, 0x54, 0x28, 0x82, 0x4b, 0x0c, 0xbb, 0xab, 0x24, 0x58, 0xc0, + 0x86, 0x28, 0x22, 0x1b, 0xe2, 0x08, 0x57, 0x89, 0xe4, 0xc4, 0x20, 0xd1, 0x44, 0x6c, 0xc2, 0x4e, + 0xe5, 0x5c, 0x12, 0xf9, 0x45, 0xe9, 0xc8, 0xda, 0xe1, 0x01, 0xe1, 0x24, 0x8e, 0xe1, 0x51, 0xb0, + 0xa3, 0x8a, 0x03, 0x18, 0xa3, 0x6c, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, + 0xf5, 0x91, 0x42, 0x12, 0x3b, 0x33, 0x3d, 0x1f, 0x2d, 0x88, 0x7f, 0x30, 0x32, 0x26, 0xb1, 0x81, + 0xc3, 0xd3, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xf6, 0xed, 0x24, 0x26, 0x1e, 0x02, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/tensor_description.proto b/executor/proto/tensorflow/core/framework/tensor_description.proto new file mode 100644 index 0000000000..4c23c7e620 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/tensor_description.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "TensorDescriptionProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; +import "tensorflow/core/framework/types.proto"; +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/allocation_description.proto"; + +message TensorDescription { + // Data type of tensor elements + DataType dtype = 1; + + // Shape of the tensor. + TensorShapeProto shape = 2; + + // Information about the size and allocator used for the data + AllocationDescription allocation_description = 4; +}; diff --git a/executor/proto/tensorflow/core/framework/tensor_shape.pb.go b/executor/proto/tensorflow/core/framework/tensor_shape.pb.go new file mode 100644 index 0000000000..f001f01279 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/tensor_shape.pb.go @@ -0,0 +1,167 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/tensor_shape.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Dimensions of a tensor. +type TensorShapeProto struct { + // Dimensions of the tensor, such as {"input", 30}, {"output", 40} + // for a 30 x 40 2D tensor. If an entry has size -1, this + // corresponds to a dimension of unknown size. The names are + // optional. + // + // The order of entries in "dim" matters: It indicates the layout of the + // values in the tensor in-memory representation. + // + // The first entry in "dim" is the outermost dimension used to layout the + // values, the last entry is the innermost dimension. This matches the + // in-memory layout of RowMajor Eigen tensors. + // + // If "dim.size()" > 0, "unknown_rank" must be false. + Dim []*TensorShapeProto_Dim `protobuf:"bytes,2,rep,name=dim,proto3" json:"dim,omitempty"` + // If true, the number of dimensions in the shape is unknown. + // + // If true, "dim.size()" must be 0. + UnknownRank bool `protobuf:"varint,3,opt,name=unknown_rank,json=unknownRank,proto3" json:"unknown_rank,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TensorShapeProto) Reset() { *m = TensorShapeProto{} } +func (m *TensorShapeProto) String() string { return proto.CompactTextString(m) } +func (*TensorShapeProto) ProtoMessage() {} +func (*TensorShapeProto) Descriptor() ([]byte, []int) { + return fileDescriptor_cd43873e75c1f7ac, []int{0} +} + +func (m *TensorShapeProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TensorShapeProto.Unmarshal(m, b) +} +func (m *TensorShapeProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TensorShapeProto.Marshal(b, m, deterministic) +} +func (m *TensorShapeProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_TensorShapeProto.Merge(m, src) +} +func (m *TensorShapeProto) XXX_Size() int { + return xxx_messageInfo_TensorShapeProto.Size(m) +} +func (m *TensorShapeProto) XXX_DiscardUnknown() { + xxx_messageInfo_TensorShapeProto.DiscardUnknown(m) +} + +var xxx_messageInfo_TensorShapeProto proto.InternalMessageInfo + +func (m *TensorShapeProto) GetDim() []*TensorShapeProto_Dim { + if m != nil { + return m.Dim + } + return nil +} + +func (m *TensorShapeProto) GetUnknownRank() bool { + if m != nil { + return m.UnknownRank + } + return false +} + +// One dimension of the tensor. +type TensorShapeProto_Dim struct { + // Size of the tensor in that dimension. + // This value must be >= -1, but values of -1 are reserved for "unknown" + // shapes (values of -1 mean "unknown" dimension). Certain wrappers + // that work with TensorShapeProto may fail at runtime when deserializing + // a TensorShapeProto containing a dim value of -1. + Size int64 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"` + // Optional name of the tensor dimension. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TensorShapeProto_Dim) Reset() { *m = TensorShapeProto_Dim{} } +func (m *TensorShapeProto_Dim) String() string { return proto.CompactTextString(m) } +func (*TensorShapeProto_Dim) ProtoMessage() {} +func (*TensorShapeProto_Dim) Descriptor() ([]byte, []int) { + return fileDescriptor_cd43873e75c1f7ac, []int{0, 0} +} + +func (m *TensorShapeProto_Dim) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TensorShapeProto_Dim.Unmarshal(m, b) +} +func (m *TensorShapeProto_Dim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TensorShapeProto_Dim.Marshal(b, m, deterministic) +} +func (m *TensorShapeProto_Dim) XXX_Merge(src proto.Message) { + xxx_messageInfo_TensorShapeProto_Dim.Merge(m, src) +} +func (m *TensorShapeProto_Dim) XXX_Size() int { + return xxx_messageInfo_TensorShapeProto_Dim.Size(m) +} +func (m *TensorShapeProto_Dim) XXX_DiscardUnknown() { + xxx_messageInfo_TensorShapeProto_Dim.DiscardUnknown(m) +} + +var xxx_messageInfo_TensorShapeProto_Dim proto.InternalMessageInfo + +func (m *TensorShapeProto_Dim) GetSize() int64 { + if m != nil { + return m.Size + } + return 0 +} + +func (m *TensorShapeProto_Dim) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterType((*TensorShapeProto)(nil), "tensorflow.TensorShapeProto") + proto.RegisterType((*TensorShapeProto_Dim)(nil), "tensorflow.TensorShapeProto.Dim") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/tensor_shape.proto", fileDescriptor_cd43873e75c1f7ac) +} + +var fileDescriptor_cd43873e75c1f7ac = []byte{ + // 229 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x29, 0x49, 0xcd, 0x2b, + 0xce, 0x2f, 0x4a, 0xcb, 0xc9, 0x2f, 0xd7, 0x4f, 0xce, 0x2f, 0x4a, 0xd5, 0x4f, 0x2b, 0x4a, 0xcc, + 0x4d, 0x2d, 0xcf, 0x2f, 0xca, 0xd6, 0x87, 0xc8, 0xc4, 0x17, 0x67, 0x24, 0x16, 0xa4, 0xea, 0x15, + 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x71, 0x21, 0x54, 0x2b, 0xcd, 0x60, 0xe4, 0x12, 0x08, 0x01, 0x73, + 0x83, 0x41, 0x2a, 0x02, 0xc0, 0x0a, 0x8c, 0xb8, 0x98, 0x53, 0x32, 0x73, 0x25, 0x98, 0x14, 0x98, + 0x35, 0xb8, 0x8d, 0x14, 0xf4, 0x10, 0xca, 0xf5, 0xd0, 0x95, 0xea, 0xb9, 0x64, 0xe6, 0x06, 0x81, + 0x14, 0x0b, 0x29, 0x72, 0xf1, 0x94, 0xe6, 0x65, 0xe7, 0xe5, 0x97, 0xe7, 0xc5, 0x17, 0x25, 0xe6, + 0x65, 0x4b, 0x30, 0x2b, 0x30, 0x6a, 0x70, 0x04, 0x71, 0x43, 0xc5, 0x82, 0x12, 0xf3, 0xb2, 0xa5, + 0x74, 0xb9, 0x98, 0x5d, 0x32, 0x73, 0x85, 0x84, 0xb8, 0x58, 0x8a, 0x33, 0xab, 0x52, 0x25, 0x18, + 0x15, 0x18, 0x35, 0x98, 0x83, 0xc0, 0x6c, 0x90, 0x58, 0x5e, 0x62, 0x6e, 0xaa, 0x04, 0x93, 0x02, + 0xa3, 0x06, 0x67, 0x10, 0x98, 0xed, 0x54, 0xc8, 0x25, 0x91, 0x5f, 0x94, 0x8e, 0x6c, 0x3b, 0xdc, + 0x57, 0x4e, 0x82, 0xe8, 0x0e, 0x29, 0x0e, 0x60, 0x8c, 0xb2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, + 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x47, 0x0a, 0x10, 0xec, 0xcc, 0xf4, 0x7c, 0xb4, 0x90, 0xfa, 0xc1, + 0xc8, 0x98, 0xc4, 0x06, 0x0e, 0x20, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5a, 0x6a, 0x6f, + 0x42, 0x50, 0x01, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/tensor_shape.proto b/executor/proto/tensorflow/core/framework/tensor_shape.proto new file mode 100644 index 0000000000..286156a012 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/tensor_shape.proto @@ -0,0 +1,46 @@ +// Protocol buffer representing the shape of tensors. + +syntax = "proto3"; +option cc_enable_arenas = true; +option java_outer_classname = "TensorShapeProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; + +package tensorflow; + +// Dimensions of a tensor. +message TensorShapeProto { + // One dimension of the tensor. + message Dim { + // Size of the tensor in that dimension. + // This value must be >= -1, but values of -1 are reserved for "unknown" + // shapes (values of -1 mean "unknown" dimension). Certain wrappers + // that work with TensorShapeProto may fail at runtime when deserializing + // a TensorShapeProto containing a dim value of -1. + int64 size = 1; + + // Optional name of the tensor dimension. + string name = 2; + }; + + // Dimensions of the tensor, such as {"input", 30}, {"output", 40} + // for a 30 x 40 2D tensor. If an entry has size -1, this + // corresponds to a dimension of unknown size. The names are + // optional. + // + // The order of entries in "dim" matters: It indicates the layout of the + // values in the tensor in-memory representation. + // + // The first entry in "dim" is the outermost dimension used to layout the + // values, the last entry is the innermost dimension. This matches the + // in-memory layout of RowMajor Eigen tensors. + // + // If "dim.size()" > 0, "unknown_rank" must be false. + repeated Dim dim = 2; + + // If true, the number of dimensions in the shape is unknown. + // + // If true, "dim.size()" must be 0. + bool unknown_rank = 3; +}; diff --git a/executor/proto/tensorflow/core/framework/tensor_slice.pb.go b/executor/proto/tensorflow/core/framework/tensor_slice.pb.go new file mode 100644 index 0000000000..f375b92b19 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/tensor_slice.pb.go @@ -0,0 +1,174 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/tensor_slice.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Can only be interpreted if you know the corresponding TensorShape. +type TensorSliceProto struct { + // Extent of the slice in all tensor dimensions. + // + // Must have one entry for each of the dimension of the tensor that this + // slice belongs to. The order of sizes is the same as the order of + // dimensions in the TensorShape. + Extent []*TensorSliceProto_Extent `protobuf:"bytes,1,rep,name=extent,proto3" json:"extent,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TensorSliceProto) Reset() { *m = TensorSliceProto{} } +func (m *TensorSliceProto) String() string { return proto.CompactTextString(m) } +func (*TensorSliceProto) ProtoMessage() {} +func (*TensorSliceProto) Descriptor() ([]byte, []int) { + return fileDescriptor_efadfca37d8372d8, []int{0} +} + +func (m *TensorSliceProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TensorSliceProto.Unmarshal(m, b) +} +func (m *TensorSliceProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TensorSliceProto.Marshal(b, m, deterministic) +} +func (m *TensorSliceProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_TensorSliceProto.Merge(m, src) +} +func (m *TensorSliceProto) XXX_Size() int { + return xxx_messageInfo_TensorSliceProto.Size(m) +} +func (m *TensorSliceProto) XXX_DiscardUnknown() { + xxx_messageInfo_TensorSliceProto.DiscardUnknown(m) +} + +var xxx_messageInfo_TensorSliceProto proto.InternalMessageInfo + +func (m *TensorSliceProto) GetExtent() []*TensorSliceProto_Extent { + if m != nil { + return m.Extent + } + return nil +} + +// Extent of the slice in one dimension. +type TensorSliceProto_Extent struct { + // Start index of the slice, starting at 0. + Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` + // Length of the slice: if the length is missing or -1 we will + // interpret this as "everything in this dimension". We use + // "oneof" to preserve information about whether the length is + // present without changing the serialization format from the + // prior proto2 version of this proto. + // + // Types that are valid to be assigned to HasLength: + // *TensorSliceProto_Extent_Length + HasLength isTensorSliceProto_Extent_HasLength `protobuf_oneof:"has_length"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TensorSliceProto_Extent) Reset() { *m = TensorSliceProto_Extent{} } +func (m *TensorSliceProto_Extent) String() string { return proto.CompactTextString(m) } +func (*TensorSliceProto_Extent) ProtoMessage() {} +func (*TensorSliceProto_Extent) Descriptor() ([]byte, []int) { + return fileDescriptor_efadfca37d8372d8, []int{0, 0} +} + +func (m *TensorSliceProto_Extent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TensorSliceProto_Extent.Unmarshal(m, b) +} +func (m *TensorSliceProto_Extent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TensorSliceProto_Extent.Marshal(b, m, deterministic) +} +func (m *TensorSliceProto_Extent) XXX_Merge(src proto.Message) { + xxx_messageInfo_TensorSliceProto_Extent.Merge(m, src) +} +func (m *TensorSliceProto_Extent) XXX_Size() int { + return xxx_messageInfo_TensorSliceProto_Extent.Size(m) +} +func (m *TensorSliceProto_Extent) XXX_DiscardUnknown() { + xxx_messageInfo_TensorSliceProto_Extent.DiscardUnknown(m) +} + +var xxx_messageInfo_TensorSliceProto_Extent proto.InternalMessageInfo + +func (m *TensorSliceProto_Extent) GetStart() int64 { + if m != nil { + return m.Start + } + return 0 +} + +type isTensorSliceProto_Extent_HasLength interface { + isTensorSliceProto_Extent_HasLength() +} + +type TensorSliceProto_Extent_Length struct { + Length int64 `protobuf:"varint,2,opt,name=length,proto3,oneof"` +} + +func (*TensorSliceProto_Extent_Length) isTensorSliceProto_Extent_HasLength() {} + +func (m *TensorSliceProto_Extent) GetHasLength() isTensorSliceProto_Extent_HasLength { + if m != nil { + return m.HasLength + } + return nil +} + +func (m *TensorSliceProto_Extent) GetLength() int64 { + if x, ok := m.GetHasLength().(*TensorSliceProto_Extent_Length); ok { + return x.Length + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*TensorSliceProto_Extent) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*TensorSliceProto_Extent_Length)(nil), + } +} + +func init() { + proto.RegisterType((*TensorSliceProto)(nil), "tensorflow.TensorSliceProto") + proto.RegisterType((*TensorSliceProto_Extent)(nil), "tensorflow.TensorSliceProto.Extent") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/tensor_slice.proto", fileDescriptor_efadfca37d8372d8) +} + +var fileDescriptor_efadfca37d8372d8 = []byte{ + // 212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x29, 0x49, 0xcd, 0x2b, + 0xce, 0x2f, 0x4a, 0xcb, 0xc9, 0x2f, 0xd7, 0x4f, 0xce, 0x2f, 0x4a, 0xd5, 0x4f, 0x2b, 0x4a, 0xcc, + 0x4d, 0x2d, 0xcf, 0x2f, 0xca, 0xd6, 0x87, 0xc8, 0xc4, 0x17, 0xe7, 0x64, 0x26, 0xa7, 0xea, 0x15, + 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x71, 0x21, 0x54, 0x2b, 0x4d, 0x67, 0xe4, 0x12, 0x08, 0x01, 0x73, + 0x83, 0x41, 0x2a, 0x02, 0xc0, 0x0a, 0xac, 0xb9, 0xd8, 0x52, 0x2b, 0x4a, 0x52, 0xf3, 0x4a, 0x24, + 0x18, 0x15, 0x98, 0x35, 0xb8, 0x8d, 0x94, 0xf5, 0x10, 0x3a, 0xf4, 0xd0, 0x55, 0xeb, 0xb9, 0x82, + 0x95, 0x06, 0x41, 0xb5, 0x48, 0xb9, 0x71, 0xb1, 0x41, 0x44, 0x84, 0x44, 0xb8, 0x58, 0x8b, 0x4b, + 0x12, 0x8b, 0x40, 0xa6, 0x30, 0x6a, 0x30, 0x07, 0x41, 0x38, 0x42, 0x12, 0x5c, 0x6c, 0x39, 0xa9, + 0x79, 0xe9, 0x25, 0x19, 0x12, 0x4c, 0x20, 0x61, 0x0f, 0x86, 0x20, 0x28, 0xdf, 0x89, 0x87, 0x8b, + 0x2b, 0x23, 0xb1, 0x38, 0x1e, 0xca, 0x2b, 0xe4, 0x92, 0xc8, 0x2f, 0x4a, 0x47, 0xb6, 0x19, 0xee, + 0x29, 0x27, 0x41, 0x74, 0x47, 0x14, 0x07, 0x30, 0x46, 0xd9, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, + 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x23, 0x85, 0x07, 0x76, 0x66, 0x7a, 0x3e, 0x5a, 0x40, 0xfd, 0x60, + 0x64, 0x4c, 0x62, 0x03, 0x87, 0x8f, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xc9, 0x83, 0xb6, 0x4e, + 0x4f, 0x01, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/tensor_slice.proto b/executor/proto/tensorflow/core/framework/tensor_slice.proto new file mode 100644 index 0000000000..a5c366ed60 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/tensor_slice.proto @@ -0,0 +1,38 @@ +// Protocol buffer representing slices of a tensor + +syntax = "proto3"; +option cc_enable_arenas = true; +option java_outer_classname = "TensorSliceProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; + +package tensorflow; + +// Can only be interpreted if you know the corresponding TensorShape. +message TensorSliceProto { + // Extent of the slice in one dimension. + message Extent { + // Either both or no attributes must be set. When no attribute is set + // means: All data in that dimension. + + // Start index of the slice, starting at 0. + int64 start = 1; + + // Length of the slice: if the length is missing or -1 we will + // interpret this as "everything in this dimension". We use + // "oneof" to preserve information about whether the length is + // present without changing the serialization format from the + // prior proto2 version of this proto. + oneof has_length { + int64 length = 2; + } + }; + + // Extent of the slice in all tensor dimensions. + // + // Must have one entry for each of the dimension of the tensor that this + // slice belongs to. The order of sizes is the same as the order of + // dimensions in the TensorShape. + repeated Extent extent = 1; +}; diff --git a/executor/proto/tensorflow/core/framework/types.pb.go b/executor/proto/tensorflow/core/framework/types.pb.go new file mode 100644 index 0000000000..9f23d62e2f --- /dev/null +++ b/executor/proto/tensorflow/core/framework/types.pb.go @@ -0,0 +1,232 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/types.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// (== suppress_warning documentation-presence ==) +// LINT.IfChange +type DataType int32 + +const ( + // Not a legal value for DataType. Used to indicate a DataType field + // has not been set. + DataType_DT_INVALID DataType = 0 + // Data types that all computation devices are expected to be + // capable to support. + DataType_DT_FLOAT DataType = 1 + DataType_DT_DOUBLE DataType = 2 + DataType_DT_INT32 DataType = 3 + DataType_DT_UINT8 DataType = 4 + DataType_DT_INT16 DataType = 5 + DataType_DT_INT8 DataType = 6 + DataType_DT_STRING DataType = 7 + DataType_DT_COMPLEX64 DataType = 8 + DataType_DT_INT64 DataType = 9 + DataType_DT_BOOL DataType = 10 + DataType_DT_QINT8 DataType = 11 + DataType_DT_QUINT8 DataType = 12 + DataType_DT_QINT32 DataType = 13 + DataType_DT_BFLOAT16 DataType = 14 + DataType_DT_QINT16 DataType = 15 + DataType_DT_QUINT16 DataType = 16 + DataType_DT_UINT16 DataType = 17 + DataType_DT_COMPLEX128 DataType = 18 + DataType_DT_HALF DataType = 19 + DataType_DT_RESOURCE DataType = 20 + DataType_DT_VARIANT DataType = 21 + DataType_DT_UINT32 DataType = 22 + DataType_DT_UINT64 DataType = 23 + // Do not use! These are only for parameters. Every enum above + // should have a corresponding value below (verified by types_test). + DataType_DT_FLOAT_REF DataType = 101 + DataType_DT_DOUBLE_REF DataType = 102 + DataType_DT_INT32_REF DataType = 103 + DataType_DT_UINT8_REF DataType = 104 + DataType_DT_INT16_REF DataType = 105 + DataType_DT_INT8_REF DataType = 106 + DataType_DT_STRING_REF DataType = 107 + DataType_DT_COMPLEX64_REF DataType = 108 + DataType_DT_INT64_REF DataType = 109 + DataType_DT_BOOL_REF DataType = 110 + DataType_DT_QINT8_REF DataType = 111 + DataType_DT_QUINT8_REF DataType = 112 + DataType_DT_QINT32_REF DataType = 113 + DataType_DT_BFLOAT16_REF DataType = 114 + DataType_DT_QINT16_REF DataType = 115 + DataType_DT_QUINT16_REF DataType = 116 + DataType_DT_UINT16_REF DataType = 117 + DataType_DT_COMPLEX128_REF DataType = 118 + DataType_DT_HALF_REF DataType = 119 + DataType_DT_RESOURCE_REF DataType = 120 + DataType_DT_VARIANT_REF DataType = 121 + DataType_DT_UINT32_REF DataType = 122 + DataType_DT_UINT64_REF DataType = 123 +) + +var DataType_name = map[int32]string{ + 0: "DT_INVALID", + 1: "DT_FLOAT", + 2: "DT_DOUBLE", + 3: "DT_INT32", + 4: "DT_UINT8", + 5: "DT_INT16", + 6: "DT_INT8", + 7: "DT_STRING", + 8: "DT_COMPLEX64", + 9: "DT_INT64", + 10: "DT_BOOL", + 11: "DT_QINT8", + 12: "DT_QUINT8", + 13: "DT_QINT32", + 14: "DT_BFLOAT16", + 15: "DT_QINT16", + 16: "DT_QUINT16", + 17: "DT_UINT16", + 18: "DT_COMPLEX128", + 19: "DT_HALF", + 20: "DT_RESOURCE", + 21: "DT_VARIANT", + 22: "DT_UINT32", + 23: "DT_UINT64", + 101: "DT_FLOAT_REF", + 102: "DT_DOUBLE_REF", + 103: "DT_INT32_REF", + 104: "DT_UINT8_REF", + 105: "DT_INT16_REF", + 106: "DT_INT8_REF", + 107: "DT_STRING_REF", + 108: "DT_COMPLEX64_REF", + 109: "DT_INT64_REF", + 110: "DT_BOOL_REF", + 111: "DT_QINT8_REF", + 112: "DT_QUINT8_REF", + 113: "DT_QINT32_REF", + 114: "DT_BFLOAT16_REF", + 115: "DT_QINT16_REF", + 116: "DT_QUINT16_REF", + 117: "DT_UINT16_REF", + 118: "DT_COMPLEX128_REF", + 119: "DT_HALF_REF", + 120: "DT_RESOURCE_REF", + 121: "DT_VARIANT_REF", + 122: "DT_UINT32_REF", + 123: "DT_UINT64_REF", +} + +var DataType_value = map[string]int32{ + "DT_INVALID": 0, + "DT_FLOAT": 1, + "DT_DOUBLE": 2, + "DT_INT32": 3, + "DT_UINT8": 4, + "DT_INT16": 5, + "DT_INT8": 6, + "DT_STRING": 7, + "DT_COMPLEX64": 8, + "DT_INT64": 9, + "DT_BOOL": 10, + "DT_QINT8": 11, + "DT_QUINT8": 12, + "DT_QINT32": 13, + "DT_BFLOAT16": 14, + "DT_QINT16": 15, + "DT_QUINT16": 16, + "DT_UINT16": 17, + "DT_COMPLEX128": 18, + "DT_HALF": 19, + "DT_RESOURCE": 20, + "DT_VARIANT": 21, + "DT_UINT32": 22, + "DT_UINT64": 23, + "DT_FLOAT_REF": 101, + "DT_DOUBLE_REF": 102, + "DT_INT32_REF": 103, + "DT_UINT8_REF": 104, + "DT_INT16_REF": 105, + "DT_INT8_REF": 106, + "DT_STRING_REF": 107, + "DT_COMPLEX64_REF": 108, + "DT_INT64_REF": 109, + "DT_BOOL_REF": 110, + "DT_QINT8_REF": 111, + "DT_QUINT8_REF": 112, + "DT_QINT32_REF": 113, + "DT_BFLOAT16_REF": 114, + "DT_QINT16_REF": 115, + "DT_QUINT16_REF": 116, + "DT_UINT16_REF": 117, + "DT_COMPLEX128_REF": 118, + "DT_HALF_REF": 119, + "DT_RESOURCE_REF": 120, + "DT_VARIANT_REF": 121, + "DT_UINT32_REF": 122, + "DT_UINT64_REF": 123, +} + +func (x DataType) String() string { + return proto.EnumName(DataType_name, int32(x)) +} + +func (DataType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_472a3bf6d9d70fb7, []int{0} +} + +func init() { + proto.RegisterEnum("tensorflow.DataType", DataType_name, DataType_value) +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/types.proto", fileDescriptor_472a3bf6d9d70fb7) +} + +var fileDescriptor_472a3bf6d9d70fb7 = []byte{ + // 483 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x93, 0xcd, 0x6e, 0xda, 0x40, + 0x10, 0xc7, 0x4b, 0x3f, 0x08, 0x59, 0xbe, 0x86, 0x4d, 0xd2, 0xf6, 0x05, 0x7a, 0xe9, 0x01, 0x64, + 0x40, 0x16, 0x97, 0x1e, 0x4c, 0x6c, 0x5a, 0x4b, 0xae, 0x17, 0x9c, 0x25, 0xaa, 0x7a, 0xb1, 0x48, + 0x64, 0x48, 0x4a, 0xc8, 0x52, 0xdb, 0x94, 0xd2, 0xbe, 0x51, 0x9f, 0xb0, 0xc7, 0x8a, 0xf1, 0xc0, + 0x6e, 0xab, 0xde, 0x3c, 0xbf, 0x99, 0xfd, 0xcf, 0xdf, 0x33, 0x1a, 0xf6, 0x26, 0x4f, 0x1e, 0x33, + 0x95, 0xce, 0x1f, 0xd4, 0xb6, 0x73, 0xab, 0xd2, 0xa4, 0x33, 0x4f, 0x67, 0xab, 0x64, 0xab, 0xd2, + 0x65, 0x27, 0xdf, 0xad, 0x93, 0xac, 0xbd, 0x4e, 0x55, 0xae, 0x38, 0xd3, 0x65, 0x6f, 0x7f, 0x95, + 0x59, 0xc5, 0x9d, 0xe5, 0x33, 0xb9, 0x5b, 0x27, 0xbc, 0xc1, 0x98, 0x2b, 0x63, 0x3f, 0xbc, 0x76, + 0x02, 0xdf, 0x85, 0x27, 0xbc, 0xc6, 0x2a, 0xae, 0x8c, 0x47, 0x81, 0x70, 0x24, 0x94, 0x78, 0x9d, + 0x9d, 0xba, 0x32, 0x76, 0xc5, 0x74, 0x18, 0x78, 0xf0, 0x94, 0x92, 0x7e, 0x28, 0x7b, 0x5d, 0x78, + 0x46, 0xd1, 0xd4, 0x0f, 0xe5, 0x00, 0x9e, 0xeb, 0x9c, 0x65, 0xc3, 0x0b, 0x5e, 0x65, 0x27, 0x45, + 0x34, 0x80, 0x32, 0xa9, 0x5c, 0xc9, 0xc8, 0x0f, 0xdf, 0xc3, 0x09, 0x07, 0x56, 0x73, 0x65, 0x7c, + 0x29, 0x3e, 0x8e, 0x03, 0xef, 0x93, 0xdd, 0x87, 0x8a, 0x7e, 0x6b, 0xf7, 0xe1, 0x94, 0xde, 0x0e, + 0x85, 0x08, 0x80, 0x51, 0x6a, 0x82, 0x4a, 0x55, 0x52, 0x9a, 0x14, 0x3d, 0x6b, 0x87, 0xb0, 0x30, + 0x54, 0xe7, 0x4d, 0x56, 0xdd, 0x3f, 0x44, 0xf3, 0x96, 0x0d, 0x0d, 0x23, 0x6f, 0xd9, 0xd0, 0xa4, + 0x7f, 0xc5, 0xd7, 0x96, 0x0d, 0x40, 0x69, 0x0a, 0x5b, 0xbc, 0xc5, 0xea, 0xda, 0x97, 0xd5, 0x1d, + 0x00, 0x27, 0x2b, 0x1f, 0x9c, 0x60, 0x04, 0x67, 0x24, 0x1f, 0x79, 0x57, 0x62, 0x1a, 0x5d, 0x7a, + 0x70, 0x4e, 0x7a, 0xd7, 0x4e, 0xe4, 0x3b, 0xa1, 0x84, 0x0b, 0x43, 0xaf, 0xd7, 0x85, 0x97, 0x46, + 0x68, 0xf7, 0xe1, 0x15, 0xfd, 0x36, 0x9a, 0x8b, 0x23, 0x6f, 0x04, 0x09, 0x35, 0x2c, 0xa6, 0x8b, + 0x68, 0x4e, 0x45, 0xa8, 0x80, 0x64, 0x41, 0x04, 0xff, 0x18, 0xc9, 0x9d, 0xae, 0xb1, 0x6c, 0x24, + 0xf7, 0xe4, 0xec, 0x58, 0xf2, 0x85, 0x94, 0x8b, 0x89, 0x23, 0x5a, 0xf2, 0x73, 0x06, 0xe6, 0xd4, + 0x91, 0x3e, 0x68, 0x2d, 0x22, 0xab, 0xc3, 0x10, 0x85, 0x08, 0x10, 0x3c, 0x52, 0xc9, 0xe4, 0xa8, + 0xae, 0x48, 0x7d, 0xa2, 0x3d, 0xad, 0x0f, 0x48, 0x1b, 0xff, 0xca, 0xcf, 0x58, 0xd3, 0xd8, 0x06, + 0xc2, 0xd4, 0xa8, 0x23, 0x94, 0x71, 0xce, 0x1a, 0x7a, 0x2b, 0xc8, 0x72, 0x2a, 0x33, 0xd0, 0x86, + 0x5f, 0xb0, 0xd6, 0x5f, 0xdb, 0x41, 0xfc, 0x8d, 0xec, 0xee, 0x37, 0x84, 0x60, 0x4b, 0x6d, 0x0f, + 0x5b, 0x42, 0xf8, 0x9d, 0x7a, 0xd0, 0xa6, 0x90, 0xed, 0x8c, 0x1e, 0x64, 0xf9, 0x87, 0x81, 0x68, + 0x1c, 0x3f, 0x87, 0x4b, 0xf6, 0x5a, 0xa5, 0x8b, 0xb6, 0x3e, 0x9f, 0xf6, 0xf1, 0xc0, 0x86, 0xd5, + 0xfd, 0x05, 0x65, 0xe3, 0xfd, 0x81, 0x65, 0xe3, 0xd2, 0xe7, 0x77, 0x8b, 0xfb, 0xfc, 0x6e, 0x73, + 0xd3, 0xbe, 0x55, 0xab, 0x8e, 0x71, 0x95, 0xff, 0xff, 0x5c, 0xa8, 0x7f, 0xce, 0xf5, 0x77, 0xa9, + 0x74, 0x53, 0xc6, 0x63, 0xed, 0xfd, 0x09, 0x00, 0x00, 0xff, 0xff, 0x68, 0xc3, 0x91, 0x21, 0xd5, + 0x03, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/types.proto b/executor/proto/tensorflow/core/framework/types.proto new file mode 100644 index 0000000000..5356f9f9c9 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/types.proto @@ -0,0 +1,76 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "TypesProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; + +// (== suppress_warning documentation-presence ==) +// LINT.IfChange +enum DataType { + // Not a legal value for DataType. Used to indicate a DataType field + // has not been set. + DT_INVALID = 0; + + // Data types that all computation devices are expected to be + // capable to support. + DT_FLOAT = 1; + DT_DOUBLE = 2; + DT_INT32 = 3; + DT_UINT8 = 4; + DT_INT16 = 5; + DT_INT8 = 6; + DT_STRING = 7; + DT_COMPLEX64 = 8; // Single-precision complex + DT_INT64 = 9; + DT_BOOL = 10; + DT_QINT8 = 11; // Quantized int8 + DT_QUINT8 = 12; // Quantized uint8 + DT_QINT32 = 13; // Quantized int32 + DT_BFLOAT16 = 14; // Float32 truncated to 16 bits. Only for cast ops. + DT_QINT16 = 15; // Quantized int16 + DT_QUINT16 = 16; // Quantized uint16 + DT_UINT16 = 17; + DT_COMPLEX128 = 18; // Double-precision complex + DT_HALF = 19; + DT_RESOURCE = 20; + DT_VARIANT = 21; // Arbitrary C++ data types + DT_UINT32 = 22; + DT_UINT64 = 23; + + // Do not use! These are only for parameters. Every enum above + // should have a corresponding value below (verified by types_test). + DT_FLOAT_REF = 101; + DT_DOUBLE_REF = 102; + DT_INT32_REF = 103; + DT_UINT8_REF = 104; + DT_INT16_REF = 105; + DT_INT8_REF = 106; + DT_STRING_REF = 107; + DT_COMPLEX64_REF = 108; + DT_INT64_REF = 109; + DT_BOOL_REF = 110; + DT_QINT8_REF = 111; + DT_QUINT8_REF = 112; + DT_QINT32_REF = 113; + DT_BFLOAT16_REF = 114; + DT_QINT16_REF = 115; + DT_QUINT16_REF = 116; + DT_UINT16_REF = 117; + DT_COMPLEX128_REF = 118; + DT_HALF_REF = 119; + DT_RESOURCE_REF = 120; + DT_VARIANT_REF = 121; + DT_UINT32_REF = 122; + DT_UINT64_REF = 123; +} +// LINT.ThenChange( +// https://www.tensorflow.org/code/tensorflow/c/tf_datatype.h, +// https://www.tensorflow.org/code/tensorflow/go/tensor.go, +// https://www.tensorflow.org/code/tensorflow/core/framework/tensor.cc, +// https://www.tensorflow.org/code/tensorflow/core/framework/types.h, +// https://www.tensorflow.org/code/tensorflow/core/framework/types.cc, +// https://www.tensorflow.org/code/tensorflow/python/framework/dtypes.py, +// https://www.tensorflow.org/code/tensorflow/python/framework/function.py) diff --git a/executor/proto/tensorflow/core/framework/variable.pb.go b/executor/proto/tensorflow/core/framework/variable.pb.go new file mode 100644 index 0000000000..0b4e76d55f --- /dev/null +++ b/executor/proto/tensorflow/core/framework/variable.pb.go @@ -0,0 +1,334 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/variable.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Indicates when a distributed variable will be synced. +type VariableSynchronization int32 + +const ( + // `AUTO`: Indicates that the synchronization will be determined by the + // current `DistributionStrategy` (eg. With `MirroredStrategy` this would be + // `ON_WRITE`). + VariableSynchronization_VARIABLE_SYNCHRONIZATION_AUTO VariableSynchronization = 0 + // `NONE`: Indicates that there will only be one copy of the variable, so + // there is no need to sync. + VariableSynchronization_VARIABLE_SYNCHRONIZATION_NONE VariableSynchronization = 1 + // `ON_WRITE`: Indicates that the variable will be updated across devices + // every time it is written. + VariableSynchronization_VARIABLE_SYNCHRONIZATION_ON_WRITE VariableSynchronization = 2 + // `ON_READ`: Indicates that the variable will be aggregated across devices + // when it is read (eg. when checkpointing or when evaluating an op that uses + // the variable). + VariableSynchronization_VARIABLE_SYNCHRONIZATION_ON_READ VariableSynchronization = 3 +) + +var VariableSynchronization_name = map[int32]string{ + 0: "VARIABLE_SYNCHRONIZATION_AUTO", + 1: "VARIABLE_SYNCHRONIZATION_NONE", + 2: "VARIABLE_SYNCHRONIZATION_ON_WRITE", + 3: "VARIABLE_SYNCHRONIZATION_ON_READ", +} + +var VariableSynchronization_value = map[string]int32{ + "VARIABLE_SYNCHRONIZATION_AUTO": 0, + "VARIABLE_SYNCHRONIZATION_NONE": 1, + "VARIABLE_SYNCHRONIZATION_ON_WRITE": 2, + "VARIABLE_SYNCHRONIZATION_ON_READ": 3, +} + +func (x VariableSynchronization) String() string { + return proto.EnumName(VariableSynchronization_name, int32(x)) +} + +func (VariableSynchronization) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_908f2d03adae2778, []int{0} +} + +// Indicates how a distributed variable will be aggregated. +type VariableAggregation int32 + +const ( + // `NONE`: This is the default, giving an error if you use a + // variable-update operation with multiple replicas. + VariableAggregation_VARIABLE_AGGREGATION_NONE VariableAggregation = 0 + // `SUM`: Add the updates across replicas. + VariableAggregation_VARIABLE_AGGREGATION_SUM VariableAggregation = 1 + // `MEAN`: Take the arithmetic mean ("average") of the updates across + // replicas. + VariableAggregation_VARIABLE_AGGREGATION_MEAN VariableAggregation = 2 + // `ONLY_FIRST_REPLICA`: This is for when every replica is performing the same + // update, but we only want to perform the update once. Used, e.g., for the + // global step counter. + VariableAggregation_VARIABLE_AGGREGATION_ONLY_FIRST_REPLICA VariableAggregation = 3 +) + +var VariableAggregation_name = map[int32]string{ + 0: "VARIABLE_AGGREGATION_NONE", + 1: "VARIABLE_AGGREGATION_SUM", + 2: "VARIABLE_AGGREGATION_MEAN", + 3: "VARIABLE_AGGREGATION_ONLY_FIRST_REPLICA", +} + +var VariableAggregation_value = map[string]int32{ + "VARIABLE_AGGREGATION_NONE": 0, + "VARIABLE_AGGREGATION_SUM": 1, + "VARIABLE_AGGREGATION_MEAN": 2, + "VARIABLE_AGGREGATION_ONLY_FIRST_REPLICA": 3, +} + +func (x VariableAggregation) String() string { + return proto.EnumName(VariableAggregation_name, int32(x)) +} + +func (VariableAggregation) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_908f2d03adae2778, []int{1} +} + +// Protocol buffer representing a Variable. +type VariableDef struct { + // Name of the variable tensor. + VariableName string `protobuf:"bytes,1,opt,name=variable_name,json=variableName,proto3" json:"variable_name,omitempty"` + // Name of the tensor holding the variable's initial value. + InitialValueName string `protobuf:"bytes,6,opt,name=initial_value_name,json=initialValueName,proto3" json:"initial_value_name,omitempty"` + // Name of the initializer op. + InitializerName string `protobuf:"bytes,2,opt,name=initializer_name,json=initializerName,proto3" json:"initializer_name,omitempty"` + // Name of the snapshot tensor. + SnapshotName string `protobuf:"bytes,3,opt,name=snapshot_name,json=snapshotName,proto3" json:"snapshot_name,omitempty"` + // Support for saving variables as slices of a larger variable. + SaveSliceInfoDef *SaveSliceInfoDef `protobuf:"bytes,4,opt,name=save_slice_info_def,json=saveSliceInfoDef,proto3" json:"save_slice_info_def,omitempty"` + // Whether to represent this as a ResourceVariable. + IsResource bool `protobuf:"varint,5,opt,name=is_resource,json=isResource,proto3" json:"is_resource,omitempty"` + // Whether this variable should be trained. + Trainable bool `protobuf:"varint,7,opt,name=trainable,proto3" json:"trainable,omitempty"` + // Indicates when a distributed variable will be synced. + Synchronization VariableSynchronization `protobuf:"varint,8,opt,name=synchronization,proto3,enum=tensorflow.VariableSynchronization" json:"synchronization,omitempty"` + // Indicates how a distributed variable will be aggregated. + Aggregation VariableAggregation `protobuf:"varint,9,opt,name=aggregation,proto3,enum=tensorflow.VariableAggregation" json:"aggregation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VariableDef) Reset() { *m = VariableDef{} } +func (m *VariableDef) String() string { return proto.CompactTextString(m) } +func (*VariableDef) ProtoMessage() {} +func (*VariableDef) Descriptor() ([]byte, []int) { + return fileDescriptor_908f2d03adae2778, []int{0} +} + +func (m *VariableDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VariableDef.Unmarshal(m, b) +} +func (m *VariableDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VariableDef.Marshal(b, m, deterministic) +} +func (m *VariableDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_VariableDef.Merge(m, src) +} +func (m *VariableDef) XXX_Size() int { + return xxx_messageInfo_VariableDef.Size(m) +} +func (m *VariableDef) XXX_DiscardUnknown() { + xxx_messageInfo_VariableDef.DiscardUnknown(m) +} + +var xxx_messageInfo_VariableDef proto.InternalMessageInfo + +func (m *VariableDef) GetVariableName() string { + if m != nil { + return m.VariableName + } + return "" +} + +func (m *VariableDef) GetInitialValueName() string { + if m != nil { + return m.InitialValueName + } + return "" +} + +func (m *VariableDef) GetInitializerName() string { + if m != nil { + return m.InitializerName + } + return "" +} + +func (m *VariableDef) GetSnapshotName() string { + if m != nil { + return m.SnapshotName + } + return "" +} + +func (m *VariableDef) GetSaveSliceInfoDef() *SaveSliceInfoDef { + if m != nil { + return m.SaveSliceInfoDef + } + return nil +} + +func (m *VariableDef) GetIsResource() bool { + if m != nil { + return m.IsResource + } + return false +} + +func (m *VariableDef) GetTrainable() bool { + if m != nil { + return m.Trainable + } + return false +} + +func (m *VariableDef) GetSynchronization() VariableSynchronization { + if m != nil { + return m.Synchronization + } + return VariableSynchronization_VARIABLE_SYNCHRONIZATION_AUTO +} + +func (m *VariableDef) GetAggregation() VariableAggregation { + if m != nil { + return m.Aggregation + } + return VariableAggregation_VARIABLE_AGGREGATION_NONE +} + +type SaveSliceInfoDef struct { + // Name of the full variable of which this is a slice. + FullName string `protobuf:"bytes,1,opt,name=full_name,json=fullName,proto3" json:"full_name,omitempty"` + // Shape of the full variable. + FullShape []int64 `protobuf:"varint,2,rep,packed,name=full_shape,json=fullShape,proto3" json:"full_shape,omitempty"` + // Offset of this variable into the full variable. + VarOffset []int64 `protobuf:"varint,3,rep,packed,name=var_offset,json=varOffset,proto3" json:"var_offset,omitempty"` + // Shape of this variable. + VarShape []int64 `protobuf:"varint,4,rep,packed,name=var_shape,json=varShape,proto3" json:"var_shape,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SaveSliceInfoDef) Reset() { *m = SaveSliceInfoDef{} } +func (m *SaveSliceInfoDef) String() string { return proto.CompactTextString(m) } +func (*SaveSliceInfoDef) ProtoMessage() {} +func (*SaveSliceInfoDef) Descriptor() ([]byte, []int) { + return fileDescriptor_908f2d03adae2778, []int{1} +} + +func (m *SaveSliceInfoDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SaveSliceInfoDef.Unmarshal(m, b) +} +func (m *SaveSliceInfoDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SaveSliceInfoDef.Marshal(b, m, deterministic) +} +func (m *SaveSliceInfoDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_SaveSliceInfoDef.Merge(m, src) +} +func (m *SaveSliceInfoDef) XXX_Size() int { + return xxx_messageInfo_SaveSliceInfoDef.Size(m) +} +func (m *SaveSliceInfoDef) XXX_DiscardUnknown() { + xxx_messageInfo_SaveSliceInfoDef.DiscardUnknown(m) +} + +var xxx_messageInfo_SaveSliceInfoDef proto.InternalMessageInfo + +func (m *SaveSliceInfoDef) GetFullName() string { + if m != nil { + return m.FullName + } + return "" +} + +func (m *SaveSliceInfoDef) GetFullShape() []int64 { + if m != nil { + return m.FullShape + } + return nil +} + +func (m *SaveSliceInfoDef) GetVarOffset() []int64 { + if m != nil { + return m.VarOffset + } + return nil +} + +func (m *SaveSliceInfoDef) GetVarShape() []int64 { + if m != nil { + return m.VarShape + } + return nil +} + +func init() { + proto.RegisterEnum("tensorflow.VariableSynchronization", VariableSynchronization_name, VariableSynchronization_value) + proto.RegisterEnum("tensorflow.VariableAggregation", VariableAggregation_name, VariableAggregation_value) + proto.RegisterType((*VariableDef)(nil), "tensorflow.VariableDef") + proto.RegisterType((*SaveSliceInfoDef)(nil), "tensorflow.SaveSliceInfoDef") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/variable.proto", fileDescriptor_908f2d03adae2778) +} + +var fileDescriptor_908f2d03adae2778 = []byte{ + // 567 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x94, 0xd1, 0x4e, 0xdb, 0x30, + 0x14, 0x86, 0x31, 0x61, 0xac, 0x3d, 0x1d, 0x10, 0x99, 0x8b, 0x65, 0x1a, 0x88, 0x02, 0x9b, 0xd6, + 0xb1, 0xa9, 0x95, 0xd8, 0xf5, 0x2e, 0x02, 0x64, 0x2c, 0x1a, 0x24, 0xc8, 0x29, 0x4c, 0x70, 0x63, + 0x99, 0xce, 0x69, 0xad, 0xa5, 0x31, 0xb2, 0xd3, 0xa0, 0xf1, 0x08, 0x7b, 0x88, 0x3d, 0xc1, 0x9e, + 0x65, 0xcf, 0xb3, 0xcb, 0x29, 0x6e, 0x43, 0xb3, 0xaa, 0x70, 0x57, 0xfd, 0xff, 0xf7, 0x9f, 0xfa, + 0x1c, 0xc7, 0x07, 0x5a, 0x19, 0x4f, 0xb5, 0x54, 0x71, 0x22, 0x6f, 0x3b, 0x3d, 0xa9, 0x78, 0x27, + 0x56, 0x6c, 0xc8, 0x6f, 0xa5, 0xfa, 0xde, 0xc9, 0x99, 0x12, 0xec, 0x3a, 0xe1, 0xed, 0x1b, 0x25, + 0x33, 0x89, 0x61, 0x4a, 0xee, 0xfc, 0xb1, 0xa0, 0x71, 0x31, 0xb1, 0x8f, 0x78, 0x8c, 0x77, 0x61, + 0xa5, 0xa4, 0x69, 0xca, 0x86, 0xdc, 0x41, 0x4d, 0xd4, 0xaa, 0x93, 0x67, 0xa5, 0x18, 0xb0, 0x21, + 0xc7, 0xef, 0x01, 0x8b, 0x54, 0x64, 0x82, 0x25, 0x34, 0x67, 0xc9, 0x68, 0x42, 0x2e, 0x1b, 0xd2, + 0x9e, 0x38, 0x17, 0x85, 0x61, 0xe8, 0xb7, 0x50, 0x6a, 0xe2, 0x8e, 0xab, 0x31, 0xbb, 0x68, 0xd8, + 0xb5, 0x8a, 0x6e, 0xd0, 0x5d, 0x58, 0xd1, 0x29, 0xbb, 0xd1, 0x03, 0x99, 0x8d, 0x39, 0x6b, 0xfc, + 0xef, 0xa5, 0x68, 0xa0, 0x2f, 0xb0, 0xae, 0x59, 0xce, 0xa9, 0x4e, 0x44, 0x8f, 0x53, 0x91, 0xc6, + 0x92, 0x7e, 0xe3, 0xb1, 0xb3, 0xd4, 0x44, 0xad, 0xc6, 0xfe, 0x46, 0x7b, 0xda, 0x5c, 0x3b, 0x62, + 0x39, 0x8f, 0x0a, 0xca, 0x4f, 0x63, 0x79, 0xc4, 0x63, 0x62, 0xeb, 0x19, 0x05, 0x6f, 0x41, 0x43, + 0x68, 0xaa, 0xb8, 0x96, 0x23, 0xd5, 0xe3, 0xce, 0x93, 0x26, 0x6a, 0xd5, 0x08, 0x08, 0x4d, 0x26, + 0x0a, 0xde, 0x80, 0x7a, 0xa6, 0x98, 0x48, 0x8b, 0xe6, 0x9d, 0xa7, 0xc6, 0x9e, 0x0a, 0xf8, 0x14, + 0xd6, 0xf4, 0x8f, 0xb4, 0x37, 0x50, 0x32, 0x15, 0x77, 0x2c, 0x13, 0x32, 0x75, 0x6a, 0x4d, 0xd4, + 0x5a, 0xdd, 0xdf, 0xad, 0x9e, 0xa3, 0x1c, 0x70, 0xf4, 0x3f, 0x4a, 0x66, 0xb3, 0xd8, 0x85, 0x06, + 0xeb, 0xf7, 0x15, 0xef, 0x8f, 0x4b, 0xd5, 0x4d, 0xa9, 0xad, 0x79, 0xa5, 0xdc, 0x29, 0x46, 0xaa, + 0x99, 0x9d, 0x9f, 0x08, 0xec, 0xd9, 0xbe, 0xf1, 0x4b, 0xa8, 0xc7, 0xa3, 0x24, 0xa9, 0xde, 0x68, + 0xad, 0x10, 0xcc, 0x3c, 0x37, 0x01, 0x8c, 0xa9, 0x07, 0xec, 0xa6, 0xb8, 0x19, 0xab, 0x65, 0x11, + 0x83, 0x47, 0x85, 0x50, 0xd8, 0x39, 0x53, 0x54, 0xc6, 0xb1, 0xe6, 0x99, 0x63, 0x8d, 0xed, 0x9c, + 0xa9, 0xd0, 0x08, 0x45, 0xe9, 0xc2, 0x1e, 0x87, 0x97, 0x8c, 0x5b, 0xcb, 0x99, 0x32, 0xd9, 0xbd, + 0xdf, 0x08, 0x9e, 0x3f, 0xd0, 0x3c, 0xde, 0x86, 0xcd, 0x0b, 0x97, 0xf8, 0xee, 0xc1, 0x89, 0x47, + 0xa3, 0xcb, 0xe0, 0xf0, 0x33, 0x09, 0x03, 0xff, 0xca, 0xed, 0xfa, 0x61, 0x40, 0xdd, 0xf3, 0x6e, + 0x68, 0x2f, 0x3c, 0x8a, 0x04, 0x61, 0xe0, 0xd9, 0x08, 0xbf, 0x86, 0xed, 0x07, 0x91, 0x30, 0xa0, + 0x5f, 0x89, 0xdf, 0xf5, 0xec, 0x45, 0xfc, 0x0a, 0x9a, 0x8f, 0x61, 0xc4, 0x73, 0x8f, 0x6c, 0x6b, + 0xef, 0x17, 0x82, 0xf5, 0x39, 0x03, 0xc6, 0x9b, 0xf0, 0xe2, 0x3e, 0xed, 0x1e, 0x1f, 0x13, 0xef, + 0xb8, 0x72, 0x86, 0x05, 0xbc, 0x01, 0xce, 0x5c, 0x3b, 0x3a, 0x3f, 0xb5, 0xd1, 0x83, 0xe1, 0x53, + 0xcf, 0x0d, 0xec, 0x45, 0xfc, 0x0e, 0xde, 0xcc, 0xb5, 0xc3, 0xe0, 0xe4, 0x92, 0x7e, 0xf2, 0x49, + 0xd4, 0xa5, 0xc4, 0x3b, 0x3b, 0xf1, 0x0f, 0x5d, 0xdb, 0x3a, 0x48, 0xc1, 0x91, 0xaa, 0x5f, 0xfd, + 0x1e, 0xee, 0x1f, 0xf9, 0xc1, 0x6a, 0x79, 0xf2, 0xb3, 0xe2, 0x91, 0xeb, 0x33, 0x74, 0xf5, 0xb1, + 0x2f, 0xb2, 0xc1, 0xe8, 0xba, 0xdd, 0x93, 0xc3, 0x4e, 0x65, 0x39, 0xcc, 0xff, 0xd9, 0x97, 0x33, + 0x5b, 0xe3, 0x2f, 0x42, 0xd7, 0xcb, 0x66, 0x61, 0x7c, 0xf8, 0x17, 0x00, 0x00, 0xff, 0xff, 0xcc, + 0xac, 0xce, 0x1a, 0x5c, 0x04, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/variable.proto b/executor/proto/tensorflow/core/framework/variable.proto new file mode 100644 index 0000000000..b2978c75c3 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/variable.proto @@ -0,0 +1,85 @@ +syntax = "proto3"; + +package tensorflow; + +option cc_enable_arenas = true; +option java_outer_classname = "VariableProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; + +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; + +// Indicates when a distributed variable will be synced. +enum VariableSynchronization { + // `AUTO`: Indicates that the synchronization will be determined by the + // current `DistributionStrategy` (eg. With `MirroredStrategy` this would be + // `ON_WRITE`). + VARIABLE_SYNCHRONIZATION_AUTO = 0; + // `NONE`: Indicates that there will only be one copy of the variable, so + // there is no need to sync. + VARIABLE_SYNCHRONIZATION_NONE = 1; + // `ON_WRITE`: Indicates that the variable will be updated across devices + // every time it is written. + VARIABLE_SYNCHRONIZATION_ON_WRITE = 2; + // `ON_READ`: Indicates that the variable will be aggregated across devices + // when it is read (eg. when checkpointing or when evaluating an op that uses + // the variable). + VARIABLE_SYNCHRONIZATION_ON_READ = 3; +} + +// Indicates how a distributed variable will be aggregated. +enum VariableAggregation { + // `NONE`: This is the default, giving an error if you use a + // variable-update operation with multiple replicas. + VARIABLE_AGGREGATION_NONE = 0; + // `SUM`: Add the updates across replicas. + VARIABLE_AGGREGATION_SUM = 1; + // `MEAN`: Take the arithmetic mean ("average") of the updates across + // replicas. + VARIABLE_AGGREGATION_MEAN = 2; + // `ONLY_FIRST_REPLICA`: This is for when every replica is performing the same + // update, but we only want to perform the update once. Used, e.g., for the + // global step counter. + VARIABLE_AGGREGATION_ONLY_FIRST_REPLICA = 3; +} + +// Protocol buffer representing a Variable. +message VariableDef { + // Name of the variable tensor. + string variable_name = 1; + + // Name of the tensor holding the variable's initial value. + string initial_value_name = 6; + + // Name of the initializer op. + string initializer_name = 2; + + // Name of the snapshot tensor. + string snapshot_name = 3; + + // Support for saving variables as slices of a larger variable. + SaveSliceInfoDef save_slice_info_def = 4; + + // Whether to represent this as a ResourceVariable. + bool is_resource = 5; + + // Whether this variable should be trained. + bool trainable = 7; + + // Indicates when a distributed variable will be synced. + VariableSynchronization synchronization = 8; + + // Indicates how a distributed variable will be aggregated. + VariableAggregation aggregation = 9; +} + +message SaveSliceInfoDef { + // Name of the full variable of which this is a slice. + string full_name = 1; + // Shape of the full variable. + repeated int64 full_shape = 2; + // Offset of this variable into the full variable. + repeated int64 var_offset = 3; + // Shape of this variable. + repeated int64 var_shape = 4; +} diff --git a/executor/proto/tensorflow/core/framework/versions.pb.go b/executor/proto/tensorflow/core/framework/versions.pb.go new file mode 100644 index 0000000000..d102539421 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/versions.pb.go @@ -0,0 +1,117 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/framework/versions.proto + +package framework + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Version information for a piece of serialized data +// +// There are different types of versions for each type of data +// (GraphDef, etc.), but they all have the same common shape +// described here. +// +// Each consumer has "consumer" and "min_producer" versions (specified +// elsewhere). A consumer is allowed to consume this data if +// +// producer >= min_producer +// consumer >= min_consumer +// consumer not in bad_consumers +// +type VersionDef struct { + // The version of the code that produced this data. + Producer int32 `protobuf:"varint,1,opt,name=producer,proto3" json:"producer,omitempty"` + // Any consumer below this version is not allowed to consume this data. + MinConsumer int32 `protobuf:"varint,2,opt,name=min_consumer,json=minConsumer,proto3" json:"min_consumer,omitempty"` + // Specific consumer versions which are disallowed (e.g. due to bugs). + BadConsumers []int32 `protobuf:"varint,3,rep,packed,name=bad_consumers,json=badConsumers,proto3" json:"bad_consumers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VersionDef) Reset() { *m = VersionDef{} } +func (m *VersionDef) String() string { return proto.CompactTextString(m) } +func (*VersionDef) ProtoMessage() {} +func (*VersionDef) Descriptor() ([]byte, []int) { + return fileDescriptor_a28d4a384b75cac3, []int{0} +} + +func (m *VersionDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VersionDef.Unmarshal(m, b) +} +func (m *VersionDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VersionDef.Marshal(b, m, deterministic) +} +func (m *VersionDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_VersionDef.Merge(m, src) +} +func (m *VersionDef) XXX_Size() int { + return xxx_messageInfo_VersionDef.Size(m) +} +func (m *VersionDef) XXX_DiscardUnknown() { + xxx_messageInfo_VersionDef.DiscardUnknown(m) +} + +var xxx_messageInfo_VersionDef proto.InternalMessageInfo + +func (m *VersionDef) GetProducer() int32 { + if m != nil { + return m.Producer + } + return 0 +} + +func (m *VersionDef) GetMinConsumer() int32 { + if m != nil { + return m.MinConsumer + } + return 0 +} + +func (m *VersionDef) GetBadConsumers() []int32 { + if m != nil { + return m.BadConsumers + } + return nil +} + +func init() { + proto.RegisterType((*VersionDef)(nil), "tensorflow.VersionDef") +} + +func init() { + proto.RegisterFile("tensorflow/core/framework/versions.proto", fileDescriptor_a28d4a384b75cac3) +} + +var fileDescriptor_a28d4a384b75cac3 = []byte{ + // 205 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x8f, 0x31, 0x4f, 0x85, 0x30, + 0x14, 0x46, 0x53, 0x5f, 0x9e, 0x31, 0xd7, 0xa7, 0x43, 0xa7, 0xc6, 0x09, 0x75, 0x61, 0x6a, 0x07, + 0x67, 0x17, 0xf4, 0x07, 0x10, 0x06, 0x07, 0x17, 0x43, 0x4b, 0xc1, 0x46, 0xdb, 0x4b, 0xee, 0x05, + 0xf9, 0xeb, 0x8e, 0x46, 0x44, 0x30, 0xe6, 0x6d, 0xed, 0xfd, 0xce, 0x70, 0x0e, 0xe4, 0x83, 0x4f, + 0x8c, 0xd4, 0xbe, 0xe3, 0x64, 0x1c, 0x92, 0x37, 0x2d, 0xd5, 0xd1, 0x4f, 0x48, 0x6f, 0xe6, 0xc3, + 0x13, 0x07, 0x4c, 0xac, 0x7b, 0xc2, 0x01, 0x25, 0x6c, 0xe4, 0x4d, 0x0f, 0xf0, 0xf4, 0xb3, 0x3e, + 0xfa, 0x56, 0x5e, 0xc1, 0x59, 0x4f, 0xd8, 0x8c, 0xce, 0x93, 0x12, 0x99, 0xc8, 0xf7, 0xd5, 0xfa, + 0x97, 0xd7, 0x70, 0x88, 0x21, 0xbd, 0x38, 0x4c, 0x3c, 0x46, 0x4f, 0xea, 0x64, 0xde, 0xcf, 0x63, + 0x48, 0x0f, 0xcb, 0x49, 0xde, 0xc2, 0x85, 0xad, 0x9b, 0x15, 0x61, 0xb5, 0xcb, 0x76, 0xf9, 0xbe, + 0x3a, 0xd8, 0xba, 0xf9, 0x65, 0xb8, 0x48, 0xa0, 0x90, 0x3a, 0xbd, 0x39, 0xe8, 0x55, 0xb4, 0xb8, + 0x5c, 0x5c, 0xb8, 0xfc, 0x16, 0xe5, 0x52, 0x3c, 0xdf, 0x77, 0x61, 0x78, 0x1d, 0xad, 0x76, 0x18, + 0xcd, 0x9f, 0xc0, 0xe3, 0xcf, 0x0e, 0xff, 0x95, 0x7f, 0x0a, 0x61, 0x4f, 0xe7, 0xe8, 0xbb, 0xaf, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x36, 0x38, 0x93, 0x7d, 0x20, 0x01, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/framework/versions.proto b/executor/proto/tensorflow/core/framework/versions.proto new file mode 100644 index 0000000000..dd2ec55238 --- /dev/null +++ b/executor/proto/tensorflow/core/framework/versions.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "VersionsProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework"; + +// Version information for a piece of serialized data +// +// There are different types of versions for each type of data +// (GraphDef, etc.), but they all have the same common shape +// described here. +// +// Each consumer has "consumer" and "min_producer" versions (specified +// elsewhere). A consumer is allowed to consume this data if +// +// producer >= min_producer +// consumer >= min_consumer +// consumer not in bad_consumers +// +message VersionDef { + // The version of the code that produced this data. + int32 producer = 1; + + // Any consumer below this version is not allowed to consume this data. + int32 min_consumer = 2; + + // Specific consumer versions which are disallowed (e.g. due to bugs). + repeated int32 bad_consumers = 3; +}; diff --git a/executor/proto/tensorflow/core/go.mod b/executor/proto/tensorflow/core/go.mod new file mode 100644 index 0000000000..c67f6808df --- /dev/null +++ b/executor/proto/tensorflow/core/go.mod @@ -0,0 +1,3 @@ +module github.com/tensorflow/tensorflow/tensorflow/go/core + +go 1.12 diff --git a/executor/proto/tensorflow/core/grappler/costs/op_performance_data.proto b/executor/proto/tensorflow/core/grappler/costs/op_performance_data.proto new file mode 100644 index 0000000000..5ef5fd927b --- /dev/null +++ b/executor/proto/tensorflow/core/grappler/costs/op_performance_data.proto @@ -0,0 +1,123 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; + +import "tensorflow/core/framework/tensor.proto"; +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/types.proto"; +import "tensorflow/core/framework/attr_value.proto"; +import "tensorflow/core/protobuf/device_properties.proto"; + +// Description of the session when an op is run. +message SessionInfo { + int64 intra_op_parallelism = 1; +} + +// Description of an operation as well as the parameters expected to impact its +// performance. +message OpInfo { + // The operation name. There may be custom parameters in attrs. + string op = 1; + + // Custom parameters impacting the behavior of the op. + map attr = 2; + + // Input data types, shapes and values if known. + message TensorProperties { + DataType dtype = 1; + TensorShapeProto shape = 2; + TensorProto value = 3; + }; + repeated TensorProperties inputs = 3; + + // Optional description of the op outputs + repeated TensorProperties outputs = 5; + + // Device on which the operation is run. + DeviceProperties device = 4; + + // Information about the session configs. + SessionInfo session_info = 6; +} + +message NormalDistribution { + double mu = 1; + double sigma = 2; +} + +message LogNormalDistribution { + double mu = 1; + double sigma = 2; +} + +// Performance data for tensorflow operations +message OpPerformance { + // The op + OpInfo op = 1; + + // Information about the session configs. + SessionInfo session_info = 12 [deprecated = true]; + + // The node name (optional). Makes it easier to associate the performance data + // with a specific graph node. + string node = 5; + + // Temporary memory used by this node (in bytes). + int64 temporary_memory_size = 2; + + // Time it takes to run the op (in nanoseconds). + int64 compute_cost = 3; + + // Analytical compute cost (in nanoseconds). + int64 compute_time = 6; + + // Analytical memory access cost (in nanoseconds). + int64 memory_time = 7; + + // Percentage of theoretical compute performance. + double compute_efficiency = 4; + + // Percentage of theoretical memory performance. + double memory_efficiency = 8; + + // Expected execution time, modeled using one of 2 possible distributions. + oneof execution_time { + NormalDistribution execution_time_normal = 10; + LogNormalDistribution execution_time_log_normal = 11; + }; + + // Memory usage data for a tensorflow operation. + message OpMemory { + // The output information may have memory usage and output shapes. + repeated int64 output_memory = 1; + + // Temp and persistent memory allocated by this node. + int64 temp_memory = 2; + int64 persistent_memory = 4; + + int64 device_temp_memory = 3 [deprecated = true]; + int64 device_persistent_memory = 5 [deprecated = true]; + } + OpMemory op_memory = 9; +} + +// A collection of OpPerformance data points. +message OpPerformanceList { + repeated OpPerformance op_performance = 1; +} diff --git a/executor/proto/tensorflow/core/kernels/boosted_trees/boosted_trees.proto b/executor/proto/tensorflow/core/kernels/boosted_trees/boosted_trees.proto new file mode 100644 index 0000000000..cd64effa5d --- /dev/null +++ b/executor/proto/tensorflow/core/kernels/boosted_trees/boosted_trees.proto @@ -0,0 +1,182 @@ +syntax = "proto3"; + +package tensorflow.boosted_trees; + +option cc_enable_arenas = true; +option java_outer_classname = "BoostedTreesProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; + +// Node describes a node in a tree. +message Node { + oneof node { + Leaf leaf = 1; + BucketizedSplit bucketized_split = 2; + CategoricalSplit categorical_split = 3; + DenseSplit dense_split = 4; + } + NodeMetadata metadata = 777; +} + +// NodeMetadata encodes metadata associated with each node in a tree. +message NodeMetadata { + // The gain associated with this node. + float gain = 1; + + // The original leaf node before this node was split. + Leaf original_leaf = 2; +} + +// Leaves can either hold dense or sparse information. +message Leaf { + oneof leaf { + // See third_party/tensorflow/contrib/decision_trees/ + // proto/generic_tree_model.proto + // for a description of how vector and sparse_vector might be used. + Vector vector = 1; + SparseVector sparse_vector = 2; + } + float scalar = 3; +} + +message Vector { + repeated float value = 1; +} + +message SparseVector { + repeated int32 index = 1; + repeated float value = 2; +} + +enum SplitTypeWithDefault { + INEQUALITY_DEFAULT_LEFT = 0; + INEQUALITY_DEFAULT_RIGHT = 1; + EQUALITY_DEFAULT_RIGHT = 3; +} + +enum DefaultDirection { + // Left is the default direction. + DEFAULT_LEFT = 0; + DEFAULT_RIGHT = 1; +} + +message BucketizedSplit { + // Float feature column and split threshold describing + // the rule feature <= threshold. + int32 feature_id = 1; + int32 threshold = 2; + // If feature column is multivalent, this holds the index of the dimension + // for the split. Defaults to 0. + int32 dimension_id = 5; + // default direction for missing values. + DefaultDirection default_direction = 6; + + // Node children indexing into a contiguous + // vector of nodes starting from the root. + int32 left_id = 3; + int32 right_id = 4; +} + +message CategoricalSplit { + // Categorical feature column and split describing the rule feature value == + // value. + int32 feature_id = 1; + int32 value = 2; + // If feature column is multivalent, this holds the index of the dimension + // for the split. Defaults to 0. + int32 dimension_id = 5; + + // Node children indexing into a contiguous + // vector of nodes starting from the root. + int32 left_id = 3; + int32 right_id = 4; +} + +// TODO(nponomareva): move out of boosted_trees and rename to trees.proto +message DenseSplit { + // Float feature column and split threshold describing + // the rule feature <= threshold. + int32 feature_id = 1; + float threshold = 2; + + // Node children indexing into a contiguous + // vector of nodes starting from the root. + int32 left_id = 3; + int32 right_id = 4; +} + +// Tree describes a list of connected nodes. +// Node 0 must be the root and can carry any payload including a leaf +// in the case of representing the bias. +// Note that each node id is implicitly its index in the list of nodes. +message Tree { + repeated Node nodes = 1; +} + +message TreeMetadata { + // Number of layers grown for this tree. + int32 num_layers_grown = 2; + + // Whether the tree is finalized in that no more layers can be grown. + bool is_finalized = 3; + + // If tree was finalized and post pruning happened, it is possible that cache + // still refers to some nodes that were deleted or that the node ids changed + // (e.g. node id 5 became node id 2 due to pruning of the other branch). + // The mapping below allows us to understand where the old ids now map to and + // how the values should be adjusted due to post-pruning. + // The size of the list should be equal to the number of nodes in the tree + // before post-pruning happened. + // If the node was pruned, it will have new_node_id equal to the id of a node + // that this node was collapsed into. For a node that didn't get pruned, it is + // possible that its id still changed, so new_node_id will have the + // corresponding id in the pruned tree. + // If post-pruning didn't happen, or it did and it had no effect (e.g. no + // nodes got pruned), this list will be empty. + repeated PostPruneNodeUpdate post_pruned_nodes_meta = 4; + + message PostPruneNodeUpdate { + int32 new_node_id = 1; + repeated float logit_change = 2; + } +} + +message GrowingMetadata { + // Number of trees that we have attempted to build. After pruning, these + // trees might have been removed. + int64 num_trees_attempted = 1; + // Number of layers that we have attempted to build. After pruning, these + // layers might have been removed. + int64 num_layers_attempted = 2; + // The start (inclusive) and end (exclusive) ids of the nodes in the latest + // layer of the latest tree. + int32 last_layer_node_start = 3; + int32 last_layer_node_end = 4; +} + +// TreeEnsemble describes an ensemble of decision trees. +message TreeEnsemble { + repeated Tree trees = 1; + repeated float tree_weights = 2; + + repeated TreeMetadata tree_metadata = 3; + // Metadata that is used during the training. + GrowingMetadata growing_metadata = 4; +} + +// DebugOutput contains outputs useful for debugging/model interpretation, at +// the individual example-level. Debug outputs that are available to the user +// are: 1) Directional feature contributions (DFCs) 2) Node IDs for ensemble +// prediction path 3) Leaf node IDs. +message DebugOutput { + // Return the logits and associated feature splits across prediction paths for + // each tree, for every example, at predict time. We will use these values to + // compute DFCs in Python, by subtracting each child prediction from its + // parent prediction and associating this change with its respective feature + // id. + repeated int32 feature_ids = 1; + repeated float logits_path = 2; + + // TODO(crawles): return 2) Node IDs for ensemble prediction path 3) Leaf node + // IDs. +} diff --git a/executor/proto/tensorflow/core/lib/core/error_codes.pb.go b/executor/proto/tensorflow/core/lib/core/error_codes.pb.go new file mode 100644 index 0000000000..8bc9710fc3 --- /dev/null +++ b/executor/proto/tensorflow/core/lib/core/error_codes.pb.go @@ -0,0 +1,236 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/lib/core/error_codes.proto + +package core + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The canonical error codes for TensorFlow APIs. +// +// Warnings: +// +// - Do not change any numeric assignments. +// - Changes to this list should only be made if there is a compelling +// need that can't be satisfied in another way. Such changes +// must be approved by at least two OWNERS. +// +// Sometimes multiple error codes may apply. Services should return +// the most specific error code that applies. For example, prefer +// OUT_OF_RANGE over FAILED_PRECONDITION if both codes apply. +// Similarly prefer NOT_FOUND or ALREADY_EXISTS over FAILED_PRECONDITION. +type Code int32 + +const ( + // Not an error; returned on success + Code_OK Code = 0 + // The operation was cancelled (typically by the caller). + Code_CANCELLED Code = 1 + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + Code_UNKNOWN Code = 2 + // Client specified an invalid argument. Note that this differs + // from FAILED_PRECONDITION. INVALID_ARGUMENT indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + Code_INVALID_ARGUMENT Code = 3 + // Deadline expired before operation could complete. For operations + // that change the state of the system, this error may be returned + // even if the operation has completed successfully. For example, a + // successful response from a server could have been delayed long + // enough for the deadline to expire. + Code_DEADLINE_EXCEEDED Code = 4 + // Some requested entity (e.g., file or directory) was not found. + // For privacy reasons, this code *may* be returned when the client + // does not have the access right to the entity. + Code_NOT_FOUND Code = 5 + // Some entity that we attempted to create (e.g., file or directory) + // already exists. + Code_ALREADY_EXISTS Code = 6 + // The caller does not have permission to execute the specified + // operation. PERMISSION_DENIED must not be used for rejections + // caused by exhausting some resource (use RESOURCE_EXHAUSTED + // instead for those errors). PERMISSION_DENIED must not be + // used if the caller can not be identified (use UNAUTHENTICATED + // instead for those errors). + Code_PERMISSION_DENIED Code = 7 + // The request does not have valid authentication credentials for the + // operation. + Code_UNAUTHENTICATED Code = 16 + // Some resource has been exhausted, perhaps a per-user quota, or + // perhaps the entire file system is out of space. + Code_RESOURCE_EXHAUSTED Code = 8 + // Operation was rejected because the system is not in a state + // required for the operation's execution. For example, directory + // to be deleted may be non-empty, an rmdir operation is applied to + // a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE: + // (a) Use UNAVAILABLE if the client can retry just the failing call. + // (b) Use ABORTED if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FAILED_PRECONDITION if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FAILED_PRECONDITION + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FAILED_PRECONDITION if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + Code_FAILED_PRECONDITION Code = 9 + // The operation was aborted, typically due to a concurrency issue + // like sequencer check failures, transaction aborts, etc. + // + // See litmus test above for deciding between FAILED_PRECONDITION, + // ABORTED, and UNAVAILABLE. + Code_ABORTED Code = 10 + // Operation tried to iterate past the valid input range. E.g., seeking or + // reading past end of file. + // + // Unlike INVALID_ARGUMENT, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate INVALID_ARGUMENT if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OUT_OF_RANGE if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FAILED_PRECONDITION and + // OUT_OF_RANGE. We recommend using OUT_OF_RANGE (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OUT_OF_RANGE error to detect when + // they are done. + Code_OUT_OF_RANGE Code = 11 + // Operation is not implemented or not supported/enabled in this service. + Code_UNIMPLEMENTED Code = 12 + // Internal errors. Means some invariant expected by the underlying + // system has been broken. If you see one of these errors, + // something is very broken. + Code_INTERNAL Code = 13 + // The service is currently unavailable. This is a most likely a + // transient condition and may be corrected by retrying with + // a backoff. + // + // See litmus test above for deciding between FAILED_PRECONDITION, + // ABORTED, and UNAVAILABLE. + Code_UNAVAILABLE Code = 14 + // Unrecoverable data loss or corruption. + Code_DATA_LOSS Code = 15 + // An extra enum entry to prevent people from writing code that + // fails to compile when a new code is added. + // + // Nobody should ever reference this enumeration entry. In particular, + // if you write C++ code that switches on this enumeration, add a default: + // case instead of a case that mentions this enumeration entry. + // + // Nobody should rely on the value (currently 20) listed here. It + // may change in the future. + Code_DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_ Code = 20 +) + +var Code_name = map[int32]string{ + 0: "OK", + 1: "CANCELLED", + 2: "UNKNOWN", + 3: "INVALID_ARGUMENT", + 4: "DEADLINE_EXCEEDED", + 5: "NOT_FOUND", + 6: "ALREADY_EXISTS", + 7: "PERMISSION_DENIED", + 16: "UNAUTHENTICATED", + 8: "RESOURCE_EXHAUSTED", + 9: "FAILED_PRECONDITION", + 10: "ABORTED", + 11: "OUT_OF_RANGE", + 12: "UNIMPLEMENTED", + 13: "INTERNAL", + 14: "UNAVAILABLE", + 15: "DATA_LOSS", + 20: "DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_", +} + +var Code_value = map[string]int32{ + "OK": 0, + "CANCELLED": 1, + "UNKNOWN": 2, + "INVALID_ARGUMENT": 3, + "DEADLINE_EXCEEDED": 4, + "NOT_FOUND": 5, + "ALREADY_EXISTS": 6, + "PERMISSION_DENIED": 7, + "UNAUTHENTICATED": 16, + "RESOURCE_EXHAUSTED": 8, + "FAILED_PRECONDITION": 9, + "ABORTED": 10, + "OUT_OF_RANGE": 11, + "UNIMPLEMENTED": 12, + "INTERNAL": 13, + "UNAVAILABLE": 14, + "DATA_LOSS": 15, + "DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_": 20, +} + +func (x Code) String() string { + return proto.EnumName(Code_name, int32(x)) +} + +func (Code) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_212fd07356b95fd6, []int{0} +} + +func init() { + proto.RegisterEnum("tensorflow.error.Code", Code_name, Code_value) +} + +func init() { + proto.RegisterFile("tensorflow/core/lib/core/error_codes.proto", fileDescriptor_212fd07356b95fd6) +} + +var fileDescriptor_212fd07356b95fd6 = []byte{ + // 429 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0xcb, 0x4e, 0x5b, 0x3d, + 0x10, 0xc7, 0xbf, 0x00, 0x1f, 0x17, 0x43, 0xc8, 0x60, 0xe8, 0xe5, 0x19, 0x58, 0x24, 0x8b, 0x6e, + 0xbb, 0x99, 0x1c, 0xcf, 0x49, 0xac, 0x38, 0xe3, 0x23, 0x5f, 0x02, 0xed, 0xc6, 0x6a, 0xc2, 0x21, + 0x45, 0x85, 0xba, 0x3a, 0x49, 0xc5, 0x0b, 0xf4, 0xa1, 0xbb, 0xac, 0x9c, 0x2e, 0x4a, 0xa5, 0xee, + 0x46, 0xf6, 0xdf, 0x9e, 0xf9, 0xe9, 0x37, 0xe2, 0x7a, 0xdb, 0x7e, 0xdd, 0xe4, 0xee, 0xfe, 0x31, + 0x3f, 0x8f, 0x56, 0xb9, 0x6b, 0x47, 0x8f, 0x0f, 0xcb, 0xdf, 0x45, 0xdb, 0x75, 0xb9, 0x4b, 0xab, + 0x7c, 0xd7, 0x6e, 0x86, 0xdf, 0xba, 0xbc, 0xcd, 0x12, 0xfe, 0x64, 0x87, 0xbb, 0xdb, 0xeb, 0x1f, + 0xfb, 0xe2, 0xa0, 0xca, 0x77, 0xad, 0x3c, 0x14, 0x7b, 0x76, 0x06, 0xff, 0xc9, 0xbe, 0x38, 0xa9, + 0x90, 0x2b, 0x32, 0x86, 0x14, 0xf4, 0xe4, 0xa9, 0x38, 0x8a, 0x3c, 0x63, 0x7b, 0xc3, 0xb0, 0x27, + 0xaf, 0x04, 0x68, 0x5e, 0xa0, 0xd1, 0x2a, 0xa1, 0x9b, 0xc4, 0x39, 0x71, 0x80, 0x7d, 0xf9, 0x4a, + 0x5c, 0x28, 0x42, 0x65, 0x34, 0x53, 0xa2, 0xdb, 0x8a, 0x48, 0x91, 0x82, 0x83, 0xf2, 0x11, 0xdb, + 0x90, 0x6a, 0x1b, 0x59, 0xc1, 0xff, 0x52, 0x8a, 0x73, 0x34, 0x8e, 0x50, 0x7d, 0x48, 0x74, 0xab, + 0x7d, 0xf0, 0x70, 0x58, 0x5e, 0x36, 0xe4, 0xe6, 0xda, 0x7b, 0x6d, 0x39, 0x29, 0x62, 0x4d, 0x0a, + 0x8e, 0xe4, 0xa5, 0x18, 0x44, 0xc6, 0x18, 0xa6, 0xc4, 0x41, 0x57, 0x18, 0x48, 0x01, 0xc8, 0xd7, + 0x42, 0x3a, 0xf2, 0x36, 0xba, 0xaa, 0x74, 0x99, 0x62, 0xf4, 0xe5, 0xfc, 0x58, 0xbe, 0x11, 0x97, + 0x35, 0x6a, 0x43, 0x2a, 0x35, 0x8e, 0x2a, 0xcb, 0x4a, 0x07, 0x6d, 0x19, 0x4e, 0xca, 0xe4, 0x38, + 0xb6, 0xae, 0xa4, 0x84, 0x04, 0x71, 0x66, 0x63, 0x48, 0xb6, 0x4e, 0x0e, 0x79, 0x42, 0x70, 0x2a, + 0x2f, 0x44, 0x3f, 0xb2, 0x9e, 0x37, 0x86, 0x0a, 0x06, 0x29, 0x38, 0x93, 0x67, 0xe2, 0x58, 0x73, + 0x20, 0xc7, 0x68, 0xa0, 0x2f, 0x07, 0xe2, 0x34, 0x32, 0x2e, 0x50, 0x1b, 0x1c, 0x1b, 0x82, 0xf3, + 0x02, 0xa4, 0x30, 0x60, 0x32, 0xd6, 0x7b, 0x18, 0xc8, 0x99, 0x98, 0x28, 0x9b, 0x0a, 0x62, 0xf4, + 0x94, 0x1c, 0x79, 0x72, 0x0b, 0x52, 0xa9, 0xb6, 0x2e, 0xd5, 0x31, 0x44, 0x57, 0xc6, 0x6c, 0x90, + 0x77, 0x68, 0x25, 0xa1, 0xa8, 0xc6, 0x68, 0x42, 0xd2, 0x9c, 0xfc, 0x8d, 0x0e, 0xd5, 0x34, 0x69, + 0xf6, 0x81, 0x50, 0x25, 0xb8, 0x1a, 0x67, 0xf1, 0x36, 0x77, 0xeb, 0xe1, 0x0b, 0x3d, 0xf7, 0xdd, + 0xa7, 0xa7, 0xf6, 0x39, 0x77, 0x5f, 0xc6, 0x40, 0xc5, 0x54, 0x91, 0xb4, 0x69, 0x8a, 0xc5, 0x4d, + 0xd3, 0xfb, 0xf8, 0x7e, 0xfd, 0xb0, 0xfd, 0xfc, 0x7d, 0x39, 0x5c, 0xe5, 0xa7, 0xd1, 0x0b, 0xff, + 0xff, 0x2e, 0xd7, 0xf9, 0xef, 0xc5, 0xf8, 0xd9, 0xeb, 0x2d, 0x0f, 0x77, 0x0b, 0xf1, 0xee, 0x57, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xba, 0x41, 0x19, 0x54, 0x3e, 0x02, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/lib/core/error_codes.proto b/executor/proto/tensorflow/core/lib/core/error_codes.proto new file mode 100644 index 0000000000..5ced65a973 --- /dev/null +++ b/executor/proto/tensorflow/core/lib/core/error_codes.proto @@ -0,0 +1,149 @@ +syntax = "proto3"; + +package tensorflow.error; +option cc_enable_arenas = true; +option java_outer_classname = "ErrorCodesProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/lib/core"; + +// The canonical error codes for TensorFlow APIs. +// +// Warnings: +// +// - Do not change any numeric assignments. +// - Changes to this list should only be made if there is a compelling +// need that can't be satisfied in another way. Such changes +// must be approved by at least two OWNERS. +// +// Sometimes multiple error codes may apply. Services should return +// the most specific error code that applies. For example, prefer +// OUT_OF_RANGE over FAILED_PRECONDITION if both codes apply. +// Similarly prefer NOT_FOUND or ALREADY_EXISTS over FAILED_PRECONDITION. +enum Code { + // Not an error; returned on success + OK = 0; + + // The operation was cancelled (typically by the caller). + CANCELLED = 1; + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + UNKNOWN = 2; + + // Client specified an invalid argument. Note that this differs + // from FAILED_PRECONDITION. INVALID_ARGUMENT indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + INVALID_ARGUMENT = 3; + + // Deadline expired before operation could complete. For operations + // that change the state of the system, this error may be returned + // even if the operation has completed successfully. For example, a + // successful response from a server could have been delayed long + // enough for the deadline to expire. + DEADLINE_EXCEEDED = 4; + + // Some requested entity (e.g., file or directory) was not found. + // For privacy reasons, this code *may* be returned when the client + // does not have the access right to the entity. + NOT_FOUND = 5; + + // Some entity that we attempted to create (e.g., file or directory) + // already exists. + ALREADY_EXISTS = 6; + + // The caller does not have permission to execute the specified + // operation. PERMISSION_DENIED must not be used for rejections + // caused by exhausting some resource (use RESOURCE_EXHAUSTED + // instead for those errors). PERMISSION_DENIED must not be + // used if the caller can not be identified (use UNAUTHENTICATED + // instead for those errors). + PERMISSION_DENIED = 7; + + // The request does not have valid authentication credentials for the + // operation. + UNAUTHENTICATED = 16; + + // Some resource has been exhausted, perhaps a per-user quota, or + // perhaps the entire file system is out of space. + RESOURCE_EXHAUSTED = 8; + + // Operation was rejected because the system is not in a state + // required for the operation's execution. For example, directory + // to be deleted may be non-empty, an rmdir operation is applied to + // a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE: + // (a) Use UNAVAILABLE if the client can retry just the failing call. + // (b) Use ABORTED if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FAILED_PRECONDITION if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FAILED_PRECONDITION + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FAILED_PRECONDITION if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + FAILED_PRECONDITION = 9; + + // The operation was aborted, typically due to a concurrency issue + // like sequencer check failures, transaction aborts, etc. + // + // See litmus test above for deciding between FAILED_PRECONDITION, + // ABORTED, and UNAVAILABLE. + ABORTED = 10; + + // Operation tried to iterate past the valid input range. E.g., seeking or + // reading past end of file. + // + // Unlike INVALID_ARGUMENT, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate INVALID_ARGUMENT if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OUT_OF_RANGE if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FAILED_PRECONDITION and + // OUT_OF_RANGE. We recommend using OUT_OF_RANGE (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OUT_OF_RANGE error to detect when + // they are done. + OUT_OF_RANGE = 11; + + // Operation is not implemented or not supported/enabled in this service. + UNIMPLEMENTED = 12; + + // Internal errors. Means some invariant expected by the underlying + // system has been broken. If you see one of these errors, + // something is very broken. + INTERNAL = 13; + + // The service is currently unavailable. This is a most likely a + // transient condition and may be corrected by retrying with + // a backoff. + // + // See litmus test above for deciding between FAILED_PRECONDITION, + // ABORTED, and UNAVAILABLE. + UNAVAILABLE = 14; + + // Unrecoverable data loss or corruption. + DATA_LOSS = 15; + + // An extra enum entry to prevent people from writing code that + // fails to compile when a new code is added. + // + // Nobody should ever reference this enumeration entry. In particular, + // if you write C++ code that switches on this enumeration, add a default: + // case instead of a case that mentions this enumeration entry. + // + // Nobody should rely on the value (currently 20) listed here. It + // may change in the future. + DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_ = 20; +} diff --git a/executor/proto/tensorflow/core/profiler/op_profile.proto b/executor/proto/tensorflow/core/profiler/op_profile.proto new file mode 100644 index 0000000000..0adca5544a --- /dev/null +++ b/executor/proto/tensorflow/core/profiler/op_profile.proto @@ -0,0 +1,76 @@ +syntax = "proto3"; + +package tensorflow.profiler.op_profile; + +// Profile is the top-level data that summarizes a program. +message Profile { + reserved 2; + reserved "by_program_structure"; + reserved 3; + reserved "per_program"; + // Root of a profile broken down by instruction category. + Node by_category = 1; + // Root of a profile broken down by program. + Node by_program = 4; +} + +// An entry in the profile tree. (An instruction, or set of instructions). +message Node { + string name = 1; // Semantics depend on contents. + Metrics metrics = 2; // May be omitted e.g. for fused instructions. + repeated Node children = 3; // Subjected to pruning. + + // Details about what this node represents. + oneof contents { + InstructionCategory category = 4; + XLAInstruction xla = 5; + } + + int32 num_children = 6; // Total number of children before pruning. + // A category of XLA instructions. + // name is a descriptive string, like "data formatting". + message InstructionCategory { + } + // A single XLA instruction. + // name is the unique instruction id, like "%multiply.5". + message XLAInstruction { + string op = 1; // Opcode like %multiply + string expression = 2; // %multiply = [shape]multiply(operand1, operand2) + string provenance = 3; // Typically the TensorFlow operation name. + string category = 4; + // Describes the physical memory layout of the instruction's primary input. + // e.g. for a convolution, this analyzes the image and ignores the kernel. + LayoutAnalysis layout = 5; + message LayoutAnalysis { + // The physical data layout, from most-minor to most-major dimensions. + repeated Dimension dimensions = 1; + message Dimension { + int32 size = 1; // Size of the data in this dimension. + int32 alignment = 2; // Data must be padded to a multiple of alignment. + string semantics = 3; // What the dimension represents, e.g. "spatial". + } + } + } +} + +// Measurements of an operation (or aggregated set of operations). +// Metrics are always "total" rather than "self". +message Metrics { + // Core-time taken by this operation, as a fraction of all operations. + double time = 1; + // Floating point computations performed by this operation, as a fraction of + // peak core FLOPS * program time. This representation has useful properties: + // - it is proportional to the number of floating point operations performed + // - utilization is flops/time + // - wasted potential flops is proportional to time - flops + // - it does not reveal the peak core FLOPS of the hardware + double flops = 2; + + // The memory bandwidth used to load operands, as a fraction of + // thereotical memory bandwidth on the specific hardware. + double memory_bandwidth = 3; + + double raw_time = 11; // Elapsed core-time in picoseconds. + double raw_flops = 12; // Total floating-point operations performed. + double raw_bytes_accessed = 13; // Total bytes accessed (include read/write). +} diff --git a/executor/proto/tensorflow/core/profiler/profile.proto b/executor/proto/tensorflow/core/profiler/profile.proto new file mode 100644 index 0000000000..27aa904c4a --- /dev/null +++ b/executor/proto/tensorflow/core/profiler/profile.proto @@ -0,0 +1,71 @@ +// This proto intends to match format expected by pprof tool. +syntax = "proto3"; + +package tensorflow.tfprof.pprof; + +message Profile { + repeated ValueType sample_type = 1; + repeated Sample sample = 2; + repeated Mapping mapping = 3; + repeated Location location = 4; + repeated Function function = 5; + repeated string string_table = 6; + int64 drop_frames = 7; + int64 keep_frames = 8; + int64 time_nanos = 9; + int64 duration_nanos = 10; + ValueType period_type = 11; + int64 period = 12; + repeated int64 comment = 13; + int64 default_sample_type = 14; +} + +message ValueType { + int64 type = 1; + int64 unit = 2; +} + +message Sample { + repeated uint64 location_id = 1; + repeated int64 value = 2; + repeated Label label = 3; +} + +message Label { + int64 key = 1; + int64 str = 2; + int64 num = 3; +} + +message Mapping { + uint64 id = 1; + uint64 memory_start = 2; + uint64 memory_limit = 3; + uint64 file_offset = 4; + int64 filename = 5; + int64 build_id = 6; + bool has_functions = 7; + bool has_filenames = 8; + bool has_line_numbers = 9; + bool has_inline_frames = 10; +} + +message Location { + uint64 id = 1; + uint64 mapping_id = 2; + uint64 address = 3; + repeated Line line = 4; +} + +message Line { + uint64 function_id = 1; + int64 line = 2; +} + +message Function { + uint64 id = 1; + int64 name = 2; + int64 system_name = 3; + int64 filename = 4; + int64 start_line = 5; +} diff --git a/executor/proto/tensorflow/core/profiler/profiler_analysis.proto b/executor/proto/tensorflow/core/profiler/profiler_analysis.proto new file mode 100644 index 0000000000..4be75de8bb --- /dev/null +++ b/executor/proto/tensorflow/core/profiler/profiler_analysis.proto @@ -0,0 +1,78 @@ +syntax = "proto3"; +package tensorflow; + +import "tensorflow/core/profiler/profiler_service.proto"; + +message NewProfileSessionRequest { + ProfileRequest request = 1; + string repository_root = 2; + repeated string hosts = 3; + string session_id = 4; +} + +message NewProfileSessionResponse { + // Auxiliary error_message. + string error_message = 1; + + // Whether all hosts had returned a empty trace. + bool empty_trace = 2; +} + +message EnumProfileSessionsAndToolsRequest { + string repository_root = 1; +} + +message ProfileSessionInfo { + string session_id = 1; + // Which tool data is available for consumption. + repeated string available_tools = 2; +} + +message EnumProfileSessionsAndToolsResponse { + // Auxiliary error_message. + string error_message = 1; + // If success, the returned sessions information are stored here. + repeated ProfileSessionInfo sessions = 2; +} + +message ProfileSessionDataRequest { + string repository_root = 1; + string session_id = 2; + // Which host the data is associated. if empty, data from all hosts are + // aggregated. + string host_name = 5; + // Which tool + string tool_name = 3; + // Tool's specific parameters. e.g. TraceViewer's viewport etc + map parameters = 4; +} + +message ProfileSessionDataResponse { + // Auxiliary error_message. + string error_message = 1; + + // Output format. e.g. "json" or "proto" or "blob" + string output_format = 2; + + // TODO(jiesun): figure out whether to put bytes or oneof tool specific proto. + bytes output = 3; +} +//////////////////////////////////////////////////////////////////////////////// +// ProfileAnalysis service provide entry point for profiling TPU and for +// serving profiled data to Tensorboard through GRPC +//////////////////////////////////////////////////////////////////////////////// +service ProfileAnalysis { + // Starts a profiling session, blocks until it completes. + // TPUProfileAnalysis service delegate this to TPUProfiler service. + // Populate the profiled data in repository, then return status to caller. + rpc NewSession(NewProfileSessionRequest) returns (NewProfileSessionResponse) { + } + // Enumerate existing sessions and return available profile tools. + rpc EnumSessions(EnumProfileSessionsAndToolsRequest) + returns (EnumProfileSessionsAndToolsResponse) { + } + // Retrieve specific tool's data for specific session. + rpc GetSessionToolData(ProfileSessionDataRequest) + returns (ProfileSessionDataResponse) { + } +} diff --git a/executor/proto/tensorflow/core/profiler/profiler_service.proto b/executor/proto/tensorflow/core/profiler/profiler_service.proto new file mode 100644 index 0000000000..be7ff7b5b1 --- /dev/null +++ b/executor/proto/tensorflow/core/profiler/profiler_service.proto @@ -0,0 +1,133 @@ +syntax = "proto3"; + +package tensorflow; + +import "tensorflow/core/framework/graph.proto"; +import "tensorflow/core/profiler/op_profile.proto"; +import "tensorflow/core/protobuf/config.proto"; + +// The ProfilerService service retrieves performance information about +// the programs running on connected devices over a period of time. +service ProfilerService { + // Starts a profiling session, blocks until it completes, and returns data. + rpc Profile(ProfileRequest) returns (ProfileResponse) {} + // Collects profiling data and returns user-friendly metrics. + rpc Monitor(MonitorRequest) returns (MonitorResponse) {} +} + +message ProfileOptions { + // We don't collect the dataset ops by default for better trace-viewer + // scalability. The caller can mannually set this field to include the ops. + bool include_dataset_ops = 1; + + // next-field: 2 +} + +message ToolRequestOptions { + // Required formats for the tool, it should be one of "json", "proto", "raw" + // etc. If not specified (backward compatible), use default format, i.e. most + // tools use json format. + string output_formats = 2; + + // Whether save the result directly to repository or pass it back to caller. + // Default to false for backward compatibilities. + bool save_to_repo = 3; +} + +message ProfileRequest { + // In future, the caller will be able to customize when profiling starts and + // stops. For now, it collects `duration_ms` milliseconds worth of data. + uint64 duration_ms = 1; + + // The maximum number of events to return. By default (value 0), return all + // events. + uint64 max_events = 2; + + // Required profiling tools name such as "input_pipeline_analyzer" etc + repeated string tools = 3; + + // Specifies the requirement for each tools. + map tool_options = 8; + + // Optional profiling options that control how a TF session will be profiled. + ProfileOptions opts = 4; + + // The place where we will dump profile data. We will normally use + // MODEL_DIR/plugin/profile/ as our repository root. + string repository_root = 5; + + // The user provided profile session identifier. + string session_id = 6; + + // The hostname of system where the profile should happen. + // We use it as identifier in part of our output filename. + string host_name = 7; + + // In future, the caller will indicate which TF session is being profiled, and + // only data relating to that program will be returned. For now, we assume + // all activity during the profiling period is relevant. + // next-field: 9 +} + +message ProfileToolData { + // The file name which this data is associated (e.g. "input_pipeline.json", + // "cluster_xxx.memory_viewer.json"). + string name = 1; + + // The data payload (likely json) for the specific tool. + bytes data = 2; +} + +message ProfileResponse { + reserved 1; // was uint64 placeholder for returning something meaningful. + // Graphs of programs executed on devices during the profiling period. + repeated GraphDef computation_graph = 2; + + // Performance profile that can be used to annotate HLO operations in the + // computation graph. + RunMetadata hlo_metadata = 5; + + // Encoded Trace proto message that contains metadata about the trace captured + // during the profiling period. Describes the devices and resources that + // 'trace_events' refers to. + bytes encoded_trace = 3; + + // Assembles a hierarchical performance profile based on HLOs in trace events. + // If the trace covers multiple programs, the longest-running one is analyzed. + // See op_profile.proto for the detailed semantics of the returned profile. + profiler.op_profile.Profile op_profile = 4; + + // Data payload for each required tools. + repeated ProfileToolData tool_data = 6; + + // When we write profiling data directly to repository directory, we need a + // way to figure out whether the captured trace is empty (due to idle TPU). + bool empty_trace = 7; + + // next-field: 8 +} + +message MonitorRequest { + // Duration for which to profile between each update. + uint64 duration_ms = 1; + + // Indicates the level at which we want to monitor. Currently, two levels are + // supported: + // Level 1: An ultra lightweight mode that captures only some utilization + // metrics. + // Level 2: More verbose than level 1. Collects utilization metrics, device + // information, step time information, etc. Do not use this option if the TPU + // host is being very heavily used. + int32 monitoring_level = 2; + // True to display timestamp in monitoring result. + bool timestamp = 3; + + // next-field: 4 +} + +message MonitorResponse { + // Properly formatted string data that can be directly returned back to user. + string data = 1; + + // next-field: 2 +} diff --git a/executor/proto/tensorflow/core/profiler/tfprof_log.proto b/executor/proto/tensorflow/core/profiler/tfprof_log.proto new file mode 100644 index 0000000000..90b9e293ec --- /dev/null +++ b/executor/proto/tensorflow/core/profiler/tfprof_log.proto @@ -0,0 +1,157 @@ +syntax = "proto3"; + +package tensorflow.tfprof; + +import "tensorflow/core/framework/attr_value.proto"; +import "tensorflow/core/framework/step_stats.proto"; + +// It specifies the Python callstack that creates an op. +message CodeDef { + repeated Trace traces = 1; + message Trace { + string file = 1 [deprecated = true]; // deprecated by file_id. + int64 file_id = 6; + + int32 lineno = 2; + + string function = 3 [deprecated = true]; // deprecated by function_id. + int64 function_id = 7; + + string line = 4 [deprecated = true]; // deprecated line_id. + int64 line_id = 8; + + int32 func_start_line = 5; + } +} + +message OpLogEntry { + // op name. + string name = 1; + // float_ops is filled by tfprof Python API when called. It requires the + // op has RegisterStatistics defined. Currently, Conv2D, MatMul, etc, are + // implemented. + int64 float_ops = 2; + // User can define extra op type information for an op. This allows the user + // to select a group of ops precisely using op_type as a key. + repeated string types = 3; + // Used to support tfprof "code" view. + CodeDef code_def = 4; +} + +message OpLogProto { + repeated OpLogEntry log_entries = 1; + + // Maps from id of CodeDef file,function,line to its string + // In the future can also map other id of other fields to string. + map id_to_string = 2; +} + +// A proto representation of the profiler's profile. +// It allows serialization, shipping around and deserialization of the profiles. +// +// Please don't depend on the internals of the profile proto. +message ProfileProto { + map nodes = 1; + // Whether or not has code traces. + bool has_trace = 2; + // Whether or not the TF device tracer fails to return accelerator + // information (which could lead to 0 accelerator execution time). + bool miss_accelerator_stream = 5; + // Traced steps. + repeated int64 steps = 3; + + // Maps from id of CodeDef file,function,line to its string + // In the future can also map other id of other fields to string. + map id_to_string = 4; +} + +message ProfileNode { + // graph node name. + string name = 1; + // graph operation type. + string op = 9; + // A unique id for the node. + int64 id = 13; + + map inputs = 2; + map input_shapes = 16; + map outputs = 3; + map output_shapes = 15; + // A map from source node id to its output index to current node. + map src_output_index = 14; + + repeated int64 shape = 4; + repeated string op_types = 5; + string canonical_device = 6; + string host_device = 7; + + int64 float_ops = 8; + + CodeDef trace = 10; + map attrs = 11; + + map execs = 12; +} + +message ExecProfile { + // Can be larger than 1 if run multiple times in loop. + int64 run_count = 1; + // The earliest/latest time including scheduling and execution. + int64 all_start_micros = 2; + int64 latest_end_micros = 3; + + // device -> vector of {op_start_micros, op_exec_micros} pairs. + // accelerator_execs: gpu:id/stream:all -> {op_start_micros, op_exec_micros} + // For accelerator, vector size can be larger than 1, multiple kernel fires + // or in tf.while_loop. + map accelerator_execs = 4; + // cpu_execs: cpu/gpu:id -> {op_start_micros, op_exec_micros} + // For cpu, vector size can be larger than 1 if in tf.while_loop. + map cpu_execs = 5; + + // Each entry to memory information of a scheduling of the node. + // Normally, there will be multiple entries in while_loop. + repeated ExecMemory memory_execs = 7; + // The allocation and deallocation times and sizes throughout execution. + repeated AllocationRecord allocations = 11; + // The devices related to this execution. + repeated string devices = 6; +} + +message ExecTime { + repeated Tuple times = 1; +} + +message ExecMemory { + // This is the timestamp when the memory information was tracked. + int64 memory_micros = 1; + // NOTE: Please don't depend on the following 4 fields yet. Due to + // TensorFlow internal tracing issues, the numbers can be quite wrong. + // TODO(xpan): Fix the TensorFlow internal tracing. + int64 host_temp_bytes = 2; + int64 host_persistent_bytes = 3; + int64 accelerator_temp_bytes = 4; + int64 accelerator_persistent_bytes = 5; + + // Total bytes requested by the op. + int64 requested_bytes = 6; + // Total bytes requested by the op and released before op end. + int64 peak_bytes = 7; + // Total bytes requested by the op and not released after op end. + int64 residual_bytes = 8; + // Total bytes output by the op (not necessarily requested by the op). + int64 output_bytes = 9; + // The total number of bytes currently allocated by the allocator if >0. + int64 allocator_bytes_in_use = 10; + // The memory of each output of the operation. + map output_memory = 11; +} + +message Tuple { + repeated int64 int64_values = 1; +} + +message Memory { + int64 bytes = 1; + uint64 ptr = 2; +} diff --git a/executor/proto/tensorflow/core/profiler/tfprof_options.proto b/executor/proto/tensorflow/core/profiler/tfprof_options.proto new file mode 100644 index 0000000000..b53288d351 --- /dev/null +++ b/executor/proto/tensorflow/core/profiler/tfprof_options.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; + +package tensorflow.tfprof; + +// Refers to tfprof_options.h/cc for documentation. +// Only used to pass tfprof options from Python to C++. +message OptionsProto { + int64 max_depth = 1; + int64 min_bytes = 2; + int64 min_peak_bytes = 19; + int64 min_residual_bytes = 20; + int64 min_output_bytes = 21; + int64 min_micros = 3; + int64 min_accelerator_micros = 22; + int64 min_cpu_micros = 23; + int64 min_params = 4; + int64 min_float_ops = 5; + int64 min_occurrence = 17; + int64 step = 18; + + string order_by = 7; + repeated string account_type_regexes = 8; + repeated string start_name_regexes = 9; + repeated string trim_name_regexes = 10; + repeated string show_name_regexes = 11; + repeated string hide_name_regexes = 12; + bool account_displayed_op_only = 13; + repeated string select = 14; + string output = 15; + string dump_to_file = 16; +} + +message AdvisorOptionsProto { + // checker name -> a dict of key-value options. + map checkers = 1; + message CheckerOption { + map options = 1; + } +} diff --git a/executor/proto/tensorflow/core/profiler/tfprof_output.proto b/executor/proto/tensorflow/core/profiler/tfprof_output.proto new file mode 100644 index 0000000000..4a6068da40 --- /dev/null +++ b/executor/proto/tensorflow/core/profiler/tfprof_output.proto @@ -0,0 +1,134 @@ +syntax = "proto3"; + +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/types.proto"; + +package tensorflow.tfprof; + +message TFProfTensorProto { + DataType dtype = 1; + // Flatten tensor in row-major. + // Only one of the following array is set. + repeated double value_double = 2; + repeated int64 value_int64 = 3; + repeated string value_str = 4; +} + +// A node in TensorFlow graph. Used by scope/graph view. +message GraphNodeProto { + // op name. + string name = 1; + // tensor value restored from checkpoint. + TFProfTensorProto tensor_value = 15; + // op execution time. + // A node can be defined once but run multiple times in tf.while_loop. + // the times sum up all different runs. + int64 run_count = 21; + int64 exec_micros = 2; + int64 accelerator_exec_micros = 17; + int64 cpu_exec_micros = 18; + + // Total bytes requested by the op. + int64 requested_bytes = 3; + // Max bytes allocated and being used by the op at a point. + int64 peak_bytes = 24; + // Total bytes requested by the op and not released before end. + int64 residual_bytes = 25; + // Total bytes output by the op (not necessarily allocated by the op). + int64 output_bytes = 26; + + // Number of parameters if available. + int64 parameters = 4; + // Number of float operations. + int64 float_ops = 13; + // Device the op is assigned to. + // Since an op can fire multiple kernel calls, there can be multiple devices. + repeated string devices = 10; + + // The following are the aggregated stats from all *accounted* children and + // the node itself. The actual children depend on the data structure used. + // In graph view, children are inputs recursively. + // In scope view, children are nodes under the name scope. + int64 total_definition_count = 23; + int64 total_run_count = 22; + int64 total_exec_micros = 6; + int64 total_accelerator_exec_micros = 19; + int64 total_cpu_exec_micros = 20; + + int64 total_requested_bytes = 7; + int64 total_peak_bytes = 27; + int64 total_residual_bytes = 28; + int64 total_output_bytes = 29; + + int64 total_parameters = 8; + int64 total_float_ops = 14; + + // shape information, if available. + // TODO(xpan): Why is this repeated? + repeated TensorShapeProto shapes = 11; + + map input_shapes = 16; + + // Descendants of the graph. The actual descendants depend on the data + // structure used (scope, graph). + repeated GraphNodeProto children = 12; +} + +// A node that groups multiple GraphNodeProto. +// Depending on the 'view', the semantics of the TFmultiGraphNodeProto +// is different: +// code view: A node groups all TensorFlow graph nodes created by the +// Python code. +// op view: A node groups all TensorFlow graph nodes that are of type +// of the op (e.g. MatMul, Conv2D). +message MultiGraphNodeProto { + // Name of the node. + string name = 1; + + // code execution time. + int64 exec_micros = 2; + int64 accelerator_exec_micros = 12; + int64 cpu_exec_micros = 13; + + // Total requested bytes by the code. + int64 requested_bytes = 3; + // Max bytes allocated and being used by the op at a point. + int64 peak_bytes = 16; + // Total bytes requested by the op and not released before end. + int64 residual_bytes = 17; + // Total bytes output by the op (not necessarily allocated by the op). + int64 output_bytes = 18; + + // Number of parameters if available. + int64 parameters = 4; + // Number of float operations. + int64 float_ops = 5; + + // The following are the aggregated stats from descendants. + // The actual descendants depend on the data structure used. + int64 total_exec_micros = 6; + int64 total_accelerator_exec_micros = 14; + int64 total_cpu_exec_micros = 15; + + int64 total_requested_bytes = 7; + int64 total_peak_bytes = 19; + int64 total_residual_bytes = 20; + int64 total_output_bytes = 21; + + int64 total_parameters = 8; + int64 total_float_ops = 9; + + // TensorFlow graph nodes contained by the MultiGraphNodeProto. + repeated GraphNodeProto graph_nodes = 10; + // Descendants of the node. The actual descendants depend on the data + // structure used. + repeated MultiGraphNodeProto children = 11; +} + +message AdviceProto { + // checker name -> a list of reports from the checker. + map checkers = 1; + message Checker { + repeated string reports = 2; + } +} diff --git a/executor/proto/tensorflow/core/protobuf/autotuning.pb.go b/executor/proto/tensorflow/core/protobuf/autotuning.pb.go new file mode 100644 index 0000000000..719cd60a3e --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/autotuning.pb.go @@ -0,0 +1,597 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/autotuning.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + duration "github.com/golang/protobuf/ptypes/duration" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type AutotuneResult_FailureKind int32 + +const ( + AutotuneResult_UNKNOWN AutotuneResult_FailureKind = 0 + AutotuneResult_REDZONE_MODIFIED AutotuneResult_FailureKind = 1 + AutotuneResult_WRONG_RESULT AutotuneResult_FailureKind = 2 +) + +var AutotuneResult_FailureKind_name = map[int32]string{ + 0: "UNKNOWN", + 1: "REDZONE_MODIFIED", + 2: "WRONG_RESULT", +} + +var AutotuneResult_FailureKind_value = map[string]int32{ + "UNKNOWN": 0, + "REDZONE_MODIFIED": 1, + "WRONG_RESULT": 2, +} + +func (x AutotuneResult_FailureKind) String() string { + return proto.EnumName(AutotuneResult_FailureKind_name, int32(x)) +} + +func (AutotuneResult_FailureKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_f61248520e180396, []int{2, 0} +} + +type CudnnVersion struct { + Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` + Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CudnnVersion) Reset() { *m = CudnnVersion{} } +func (m *CudnnVersion) String() string { return proto.CompactTextString(m) } +func (*CudnnVersion) ProtoMessage() {} +func (*CudnnVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_f61248520e180396, []int{0} +} + +func (m *CudnnVersion) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CudnnVersion.Unmarshal(m, b) +} +func (m *CudnnVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CudnnVersion.Marshal(b, m, deterministic) +} +func (m *CudnnVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_CudnnVersion.Merge(m, src) +} +func (m *CudnnVersion) XXX_Size() int { + return xxx_messageInfo_CudnnVersion.Size(m) +} +func (m *CudnnVersion) XXX_DiscardUnknown() { + xxx_messageInfo_CudnnVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_CudnnVersion proto.InternalMessageInfo + +func (m *CudnnVersion) GetMajor() int32 { + if m != nil { + return m.Major + } + return 0 +} + +func (m *CudnnVersion) GetMinor() int32 { + if m != nil { + return m.Minor + } + return 0 +} + +func (m *CudnnVersion) GetPatch() int32 { + if m != nil { + return m.Patch + } + return 0 +} + +type ComputeCapability struct { + Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ComputeCapability) Reset() { *m = ComputeCapability{} } +func (m *ComputeCapability) String() string { return proto.CompactTextString(m) } +func (*ComputeCapability) ProtoMessage() {} +func (*ComputeCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_f61248520e180396, []int{1} +} + +func (m *ComputeCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ComputeCapability.Unmarshal(m, b) +} +func (m *ComputeCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ComputeCapability.Marshal(b, m, deterministic) +} +func (m *ComputeCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_ComputeCapability.Merge(m, src) +} +func (m *ComputeCapability) XXX_Size() int { + return xxx_messageInfo_ComputeCapability.Size(m) +} +func (m *ComputeCapability) XXX_DiscardUnknown() { + xxx_messageInfo_ComputeCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_ComputeCapability proto.InternalMessageInfo + +func (m *ComputeCapability) GetMajor() int32 { + if m != nil { + return m.Major + } + return 0 +} + +func (m *ComputeCapability) GetMinor() int32 { + if m != nil { + return m.Minor + } + return 0 +} + +type AutotuneResult struct { + ScratchBytes int64 `protobuf:"varint,8,opt,name=scratch_bytes,json=scratchBytes,proto3" json:"scratch_bytes,omitempty"` + RunTime *duration.Duration `protobuf:"bytes,9,opt,name=run_time,json=runTime,proto3" json:"run_time,omitempty"` + Failure *AutotuneResult_FailureResult `protobuf:"bytes,7,opt,name=failure,proto3" json:"failure,omitempty"` + // Types that are valid to be assigned to Key: + // *AutotuneResult_Conv + // *AutotuneResult_Gemm + Key isAutotuneResult_Key `protobuf_oneof:"key"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AutotuneResult) Reset() { *m = AutotuneResult{} } +func (m *AutotuneResult) String() string { return proto.CompactTextString(m) } +func (*AutotuneResult) ProtoMessage() {} +func (*AutotuneResult) Descriptor() ([]byte, []int) { + return fileDescriptor_f61248520e180396, []int{2} +} + +func (m *AutotuneResult) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AutotuneResult.Unmarshal(m, b) +} +func (m *AutotuneResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AutotuneResult.Marshal(b, m, deterministic) +} +func (m *AutotuneResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_AutotuneResult.Merge(m, src) +} +func (m *AutotuneResult) XXX_Size() int { + return xxx_messageInfo_AutotuneResult.Size(m) +} +func (m *AutotuneResult) XXX_DiscardUnknown() { + xxx_messageInfo_AutotuneResult.DiscardUnknown(m) +} + +var xxx_messageInfo_AutotuneResult proto.InternalMessageInfo + +func (m *AutotuneResult) GetScratchBytes() int64 { + if m != nil { + return m.ScratchBytes + } + return 0 +} + +func (m *AutotuneResult) GetRunTime() *duration.Duration { + if m != nil { + return m.RunTime + } + return nil +} + +func (m *AutotuneResult) GetFailure() *AutotuneResult_FailureResult { + if m != nil { + return m.Failure + } + return nil +} + +type isAutotuneResult_Key interface { + isAutotuneResult_Key() +} + +type AutotuneResult_Conv struct { + Conv *AutotuneResult_ConvKey `protobuf:"bytes,5,opt,name=conv,proto3,oneof"` +} + +type AutotuneResult_Gemm struct { + Gemm *AutotuneResult_GemmKey `protobuf:"bytes,6,opt,name=gemm,proto3,oneof"` +} + +func (*AutotuneResult_Conv) isAutotuneResult_Key() {} + +func (*AutotuneResult_Gemm) isAutotuneResult_Key() {} + +func (m *AutotuneResult) GetKey() isAutotuneResult_Key { + if m != nil { + return m.Key + } + return nil +} + +func (m *AutotuneResult) GetConv() *AutotuneResult_ConvKey { + if x, ok := m.GetKey().(*AutotuneResult_Conv); ok { + return x.Conv + } + return nil +} + +func (m *AutotuneResult) GetGemm() *AutotuneResult_GemmKey { + if x, ok := m.GetKey().(*AutotuneResult_Gemm); ok { + return x.Gemm + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AutotuneResult) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*AutotuneResult_Conv)(nil), + (*AutotuneResult_Gemm)(nil), + } +} + +type AutotuneResult_FailureResult struct { + Kind AutotuneResult_FailureKind `protobuf:"varint,1,opt,name=kind,proto3,enum=tensorflow.AutotuneResult_FailureKind" json:"kind,omitempty"` + Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` + // For failure_kind == WRONG_RESULT, this field indicates the reference + // configuration that we compared against. + // + // Note that the reference algorithm isn't always correct. However, + // empirically it's more correct, as it's "algo 0", less fancy than the + // compared one. + // + // Types that are valid to be assigned to Key: + // *AutotuneResult_FailureResult_ReferenceConv + // *AutotuneResult_FailureResult_ReferenceGemm + Key isAutotuneResult_FailureResult_Key `protobuf_oneof:"key"` + BufferAddress int64 `protobuf:"varint,13,opt,name=buffer_address,json=bufferAddress,proto3" json:"buffer_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AutotuneResult_FailureResult) Reset() { *m = AutotuneResult_FailureResult{} } +func (m *AutotuneResult_FailureResult) String() string { return proto.CompactTextString(m) } +func (*AutotuneResult_FailureResult) ProtoMessage() {} +func (*AutotuneResult_FailureResult) Descriptor() ([]byte, []int) { + return fileDescriptor_f61248520e180396, []int{2, 0} +} + +func (m *AutotuneResult_FailureResult) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AutotuneResult_FailureResult.Unmarshal(m, b) +} +func (m *AutotuneResult_FailureResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AutotuneResult_FailureResult.Marshal(b, m, deterministic) +} +func (m *AutotuneResult_FailureResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_AutotuneResult_FailureResult.Merge(m, src) +} +func (m *AutotuneResult_FailureResult) XXX_Size() int { + return xxx_messageInfo_AutotuneResult_FailureResult.Size(m) +} +func (m *AutotuneResult_FailureResult) XXX_DiscardUnknown() { + xxx_messageInfo_AutotuneResult_FailureResult.DiscardUnknown(m) +} + +var xxx_messageInfo_AutotuneResult_FailureResult proto.InternalMessageInfo + +func (m *AutotuneResult_FailureResult) GetKind() AutotuneResult_FailureKind { + if m != nil { + return m.Kind + } + return AutotuneResult_UNKNOWN +} + +func (m *AutotuneResult_FailureResult) GetMsg() string { + if m != nil { + return m.Msg + } + return "" +} + +type isAutotuneResult_FailureResult_Key interface { + isAutotuneResult_FailureResult_Key() +} + +type AutotuneResult_FailureResult_ReferenceConv struct { + ReferenceConv *AutotuneResult_ConvKey `protobuf:"bytes,11,opt,name=reference_conv,json=referenceConv,proto3,oneof"` +} + +type AutotuneResult_FailureResult_ReferenceGemm struct { + ReferenceGemm *AutotuneResult_GemmKey `protobuf:"bytes,12,opt,name=reference_gemm,json=referenceGemm,proto3,oneof"` +} + +func (*AutotuneResult_FailureResult_ReferenceConv) isAutotuneResult_FailureResult_Key() {} + +func (*AutotuneResult_FailureResult_ReferenceGemm) isAutotuneResult_FailureResult_Key() {} + +func (m *AutotuneResult_FailureResult) GetKey() isAutotuneResult_FailureResult_Key { + if m != nil { + return m.Key + } + return nil +} + +func (m *AutotuneResult_FailureResult) GetReferenceConv() *AutotuneResult_ConvKey { + if x, ok := m.GetKey().(*AutotuneResult_FailureResult_ReferenceConv); ok { + return x.ReferenceConv + } + return nil +} + +func (m *AutotuneResult_FailureResult) GetReferenceGemm() *AutotuneResult_GemmKey { + if x, ok := m.GetKey().(*AutotuneResult_FailureResult_ReferenceGemm); ok { + return x.ReferenceGemm + } + return nil +} + +func (m *AutotuneResult_FailureResult) GetBufferAddress() int64 { + if m != nil { + return m.BufferAddress + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AutotuneResult_FailureResult) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*AutotuneResult_FailureResult_ReferenceConv)(nil), + (*AutotuneResult_FailureResult_ReferenceGemm)(nil), + } +} + +type AutotuneResult_ConvKey struct { + Algorithm int64 `protobuf:"varint,1,opt,name=algorithm,proto3" json:"algorithm,omitempty"` + TensorOpsEnabled bool `protobuf:"varint,2,opt,name=tensor_ops_enabled,json=tensorOpsEnabled,proto3" json:"tensor_ops_enabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AutotuneResult_ConvKey) Reset() { *m = AutotuneResult_ConvKey{} } +func (m *AutotuneResult_ConvKey) String() string { return proto.CompactTextString(m) } +func (*AutotuneResult_ConvKey) ProtoMessage() {} +func (*AutotuneResult_ConvKey) Descriptor() ([]byte, []int) { + return fileDescriptor_f61248520e180396, []int{2, 1} +} + +func (m *AutotuneResult_ConvKey) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AutotuneResult_ConvKey.Unmarshal(m, b) +} +func (m *AutotuneResult_ConvKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AutotuneResult_ConvKey.Marshal(b, m, deterministic) +} +func (m *AutotuneResult_ConvKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_AutotuneResult_ConvKey.Merge(m, src) +} +func (m *AutotuneResult_ConvKey) XXX_Size() int { + return xxx_messageInfo_AutotuneResult_ConvKey.Size(m) +} +func (m *AutotuneResult_ConvKey) XXX_DiscardUnknown() { + xxx_messageInfo_AutotuneResult_ConvKey.DiscardUnknown(m) +} + +var xxx_messageInfo_AutotuneResult_ConvKey proto.InternalMessageInfo + +func (m *AutotuneResult_ConvKey) GetAlgorithm() int64 { + if m != nil { + return m.Algorithm + } + return 0 +} + +func (m *AutotuneResult_ConvKey) GetTensorOpsEnabled() bool { + if m != nil { + return m.TensorOpsEnabled + } + return false +} + +type AutotuneResult_GemmKey struct { + Algorithm int64 `protobuf:"varint,1,opt,name=algorithm,proto3" json:"algorithm,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AutotuneResult_GemmKey) Reset() { *m = AutotuneResult_GemmKey{} } +func (m *AutotuneResult_GemmKey) String() string { return proto.CompactTextString(m) } +func (*AutotuneResult_GemmKey) ProtoMessage() {} +func (*AutotuneResult_GemmKey) Descriptor() ([]byte, []int) { + return fileDescriptor_f61248520e180396, []int{2, 2} +} + +func (m *AutotuneResult_GemmKey) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AutotuneResult_GemmKey.Unmarshal(m, b) +} +func (m *AutotuneResult_GemmKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AutotuneResult_GemmKey.Marshal(b, m, deterministic) +} +func (m *AutotuneResult_GemmKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_AutotuneResult_GemmKey.Merge(m, src) +} +func (m *AutotuneResult_GemmKey) XXX_Size() int { + return xxx_messageInfo_AutotuneResult_GemmKey.Size(m) +} +func (m *AutotuneResult_GemmKey) XXX_DiscardUnknown() { + xxx_messageInfo_AutotuneResult_GemmKey.DiscardUnknown(m) +} + +var xxx_messageInfo_AutotuneResult_GemmKey proto.InternalMessageInfo + +func (m *AutotuneResult_GemmKey) GetAlgorithm() int64 { + if m != nil { + return m.Algorithm + } + return 0 +} + +type AutotuningLog struct { + Instr *any.Any `protobuf:"bytes,1,opt,name=instr,proto3" json:"instr,omitempty"` + // Records all auto-tuning results per algorithm. + Results []*AutotuneResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"` + CudnnVersion *CudnnVersion `protobuf:"bytes,3,opt,name=cudnn_version,json=cudnnVersion,proto3" json:"cudnn_version,omitempty"` + ComputeCapability *ComputeCapability `protobuf:"bytes,4,opt,name=compute_capability,json=computeCapability,proto3" json:"compute_capability,omitempty"` + // stream_executor::DeviceDescription::pci_bus_id. + DevicePciBusId string `protobuf:"bytes,5,opt,name=device_pci_bus_id,json=devicePciBusId,proto3" json:"device_pci_bus_id,omitempty"` + BlasVersion string `protobuf:"bytes,6,opt,name=blas_version,json=blasVersion,proto3" json:"blas_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AutotuningLog) Reset() { *m = AutotuningLog{} } +func (m *AutotuningLog) String() string { return proto.CompactTextString(m) } +func (*AutotuningLog) ProtoMessage() {} +func (*AutotuningLog) Descriptor() ([]byte, []int) { + return fileDescriptor_f61248520e180396, []int{3} +} + +func (m *AutotuningLog) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AutotuningLog.Unmarshal(m, b) +} +func (m *AutotuningLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AutotuningLog.Marshal(b, m, deterministic) +} +func (m *AutotuningLog) XXX_Merge(src proto.Message) { + xxx_messageInfo_AutotuningLog.Merge(m, src) +} +func (m *AutotuningLog) XXX_Size() int { + return xxx_messageInfo_AutotuningLog.Size(m) +} +func (m *AutotuningLog) XXX_DiscardUnknown() { + xxx_messageInfo_AutotuningLog.DiscardUnknown(m) +} + +var xxx_messageInfo_AutotuningLog proto.InternalMessageInfo + +func (m *AutotuningLog) GetInstr() *any.Any { + if m != nil { + return m.Instr + } + return nil +} + +func (m *AutotuningLog) GetResults() []*AutotuneResult { + if m != nil { + return m.Results + } + return nil +} + +func (m *AutotuningLog) GetCudnnVersion() *CudnnVersion { + if m != nil { + return m.CudnnVersion + } + return nil +} + +func (m *AutotuningLog) GetComputeCapability() *ComputeCapability { + if m != nil { + return m.ComputeCapability + } + return nil +} + +func (m *AutotuningLog) GetDevicePciBusId() string { + if m != nil { + return m.DevicePciBusId + } + return "" +} + +func (m *AutotuningLog) GetBlasVersion() string { + if m != nil { + return m.BlasVersion + } + return "" +} + +func init() { + proto.RegisterEnum("tensorflow.AutotuneResult_FailureKind", AutotuneResult_FailureKind_name, AutotuneResult_FailureKind_value) + proto.RegisterType((*CudnnVersion)(nil), "tensorflow.CudnnVersion") + proto.RegisterType((*ComputeCapability)(nil), "tensorflow.ComputeCapability") + proto.RegisterType((*AutotuneResult)(nil), "tensorflow.AutotuneResult") + proto.RegisterType((*AutotuneResult_FailureResult)(nil), "tensorflow.AutotuneResult.FailureResult") + proto.RegisterType((*AutotuneResult_ConvKey)(nil), "tensorflow.AutotuneResult.ConvKey") + proto.RegisterType((*AutotuneResult_GemmKey)(nil), "tensorflow.AutotuneResult.GemmKey") + proto.RegisterType((*AutotuningLog)(nil), "tensorflow.AutotuningLog") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/autotuning.proto", fileDescriptor_f61248520e180396) +} + +var fileDescriptor_f61248520e180396 = []byte{ + // 678 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xdb, 0x6e, 0x1a, 0x3d, + 0x18, 0x0c, 0x10, 0x20, 0x7c, 0x0b, 0x88, 0x58, 0xb9, 0xd8, 0xa0, 0xff, 0xaf, 0x28, 0x55, 0x5b, + 0x52, 0x55, 0x44, 0xa2, 0xb9, 0xa8, 0x2a, 0x55, 0x15, 0xa7, 0xa4, 0x11, 0x29, 0x44, 0x6e, 0xd2, + 0x48, 0xbd, 0x59, 0xed, 0xc1, 0x10, 0x37, 0xbb, 0xf6, 0xca, 0xde, 0xa5, 0xe2, 0x45, 0xfa, 0x06, + 0x7d, 0x9b, 0x3e, 0x54, 0xb5, 0xf6, 0x86, 0x43, 0xa2, 0x26, 0xca, 0x1d, 0x1e, 0x7f, 0x33, 0xfa, + 0x66, 0x58, 0x0f, 0x1c, 0x44, 0x84, 0x49, 0x2e, 0xa6, 0x3e, 0xff, 0x79, 0xe8, 0x72, 0x41, 0x0e, + 0x43, 0xc1, 0x23, 0xee, 0xc4, 0xd3, 0x43, 0x3b, 0x8e, 0x78, 0x14, 0x33, 0xca, 0x66, 0x6d, 0x85, + 0x21, 0x58, 0x8d, 0xd6, 0xf7, 0x67, 0x9c, 0xcf, 0xfc, 0xf5, 0x69, 0xb6, 0xd0, 0x63, 0xf5, 0x67, + 0x77, 0xaf, 0xbc, 0x58, 0xd8, 0x11, 0xe5, 0x4c, 0xdf, 0x37, 0xcf, 0xa1, 0xdc, 0x8f, 0x3d, 0xc6, + 0xbe, 0x11, 0x21, 0x29, 0x67, 0x68, 0x0f, 0xf2, 0x81, 0xfd, 0x83, 0x0b, 0x33, 0xd3, 0xc8, 0xb4, + 0xf2, 0x58, 0x1f, 0x14, 0x4a, 0x19, 0x17, 0x66, 0x36, 0x45, 0x93, 0x43, 0x82, 0x86, 0x76, 0xe4, + 0x5e, 0x9b, 0x39, 0x8d, 0xaa, 0x43, 0xf3, 0x13, 0xec, 0xf6, 0x79, 0x10, 0xc6, 0x11, 0xe9, 0xdb, + 0xa1, 0xed, 0x50, 0x9f, 0x46, 0x8b, 0xa7, 0xc8, 0x36, 0x7f, 0x15, 0xa0, 0xda, 0xd5, 0x76, 0x09, + 0x26, 0x32, 0xf6, 0x23, 0xf4, 0x02, 0x2a, 0xd2, 0x15, 0x89, 0xbc, 0xe5, 0x2c, 0x22, 0x22, 0xcd, + 0x9d, 0x46, 0xa6, 0x95, 0xc3, 0xe5, 0x14, 0xec, 0x25, 0x18, 0x3a, 0x82, 0x1d, 0x11, 0x33, 0x2b, + 0xa2, 0x01, 0x31, 0x4b, 0x8d, 0x4c, 0xcb, 0xe8, 0xec, 0xb7, 0xb5, 0xfb, 0xf6, 0xad, 0xfb, 0xf6, + 0x20, 0x75, 0x8f, 0x8b, 0x22, 0x66, 0x17, 0x34, 0x20, 0xa8, 0x07, 0xc5, 0xa9, 0x4d, 0xfd, 0x58, + 0x10, 0xb3, 0xa8, 0x48, 0xad, 0xf6, 0x2a, 0xd9, 0xf6, 0xe6, 0x1e, 0xed, 0x63, 0x3d, 0xa9, 0x4f, + 0xf8, 0x96, 0x88, 0xde, 0xc3, 0xb6, 0xcb, 0xd9, 0xdc, 0xcc, 0x2b, 0x81, 0xe6, 0x03, 0x02, 0x7d, + 0xce, 0xe6, 0x23, 0xb2, 0xf8, 0xbc, 0x85, 0x15, 0x23, 0x61, 0xce, 0x48, 0x10, 0x98, 0x85, 0x47, + 0x99, 0x27, 0x24, 0x08, 0x52, 0x66, 0xc2, 0xa8, 0xff, 0xce, 0x42, 0x65, 0x63, 0x1d, 0xf4, 0x01, + 0xb6, 0x6f, 0x28, 0xf3, 0x54, 0xc4, 0xd5, 0xce, 0xab, 0xc7, 0x6d, 0x8c, 0x28, 0xf3, 0xb0, 0xe2, + 0xa0, 0x1a, 0xe4, 0x02, 0x39, 0x53, 0xff, 0x43, 0x09, 0x27, 0x3f, 0xd1, 0x08, 0xaa, 0x82, 0x4c, + 0x89, 0x20, 0xcc, 0x25, 0x96, 0x72, 0x67, 0x3c, 0xc1, 0x5d, 0x65, 0xc9, 0x4d, 0xb0, 0x4d, 0x31, + 0x65, 0xb8, 0xfc, 0x04, 0xc3, 0x2b, 0xb1, 0x04, 0x43, 0x2f, 0xa1, 0xea, 0xc4, 0xd3, 0x29, 0x11, + 0x96, 0xed, 0x79, 0x82, 0x48, 0x69, 0x56, 0xd4, 0xd7, 0x50, 0xd1, 0x68, 0x57, 0x83, 0xbd, 0x3c, + 0xe4, 0x6e, 0xc8, 0xa2, 0x7e, 0x09, 0xc5, 0x74, 0x2d, 0xf4, 0x1f, 0x94, 0x6c, 0x7f, 0xc6, 0x05, + 0x8d, 0xae, 0x03, 0x95, 0x52, 0x0e, 0xaf, 0x00, 0xf4, 0x16, 0x90, 0x5e, 0xc6, 0xe2, 0xa1, 0xb4, + 0x08, 0xb3, 0x1d, 0x9f, 0x78, 0x2a, 0x91, 0x1d, 0x5c, 0xd3, 0x37, 0x93, 0x50, 0x0e, 0x35, 0x5e, + 0x7f, 0x0d, 0xc5, 0x74, 0xc1, 0x87, 0x65, 0x9b, 0x3d, 0x30, 0xd6, 0xe2, 0x46, 0x06, 0x14, 0x2f, + 0xc7, 0xa3, 0xf1, 0xe4, 0x6a, 0x5c, 0xdb, 0x42, 0x7b, 0x50, 0xc3, 0xc3, 0xc1, 0xf7, 0xc9, 0x78, + 0x68, 0x7d, 0x99, 0x0c, 0x4e, 0x8f, 0x4f, 0x87, 0x83, 0x5a, 0x06, 0xd5, 0xa0, 0x7c, 0x85, 0x27, + 0xe3, 0x13, 0x0b, 0x0f, 0xbf, 0x5e, 0x9e, 0x5d, 0xd4, 0xb2, 0xa9, 0x95, 0xe6, 0x9f, 0x2c, 0x54, + 0xba, 0xcb, 0x1e, 0x38, 0xe3, 0x33, 0xf4, 0x06, 0xf2, 0x94, 0xc9, 0x48, 0x3f, 0x2b, 0xa3, 0xb3, + 0x77, 0xef, 0x7b, 0xef, 0xb2, 0x05, 0xd6, 0x23, 0xe8, 0x08, 0x8a, 0x42, 0x25, 0x2b, 0xcd, 0x6c, + 0x23, 0xd7, 0x32, 0x3a, 0xf5, 0x7f, 0x87, 0x8f, 0x6f, 0x47, 0xd1, 0x47, 0xa8, 0xb8, 0x49, 0x3f, + 0x58, 0x73, 0x5d, 0x10, 0xea, 0xad, 0x1b, 0x1d, 0x73, 0x9d, 0xbb, 0x5e, 0x20, 0xb8, 0xec, 0xae, + 0xd7, 0xc9, 0x19, 0x20, 0x57, 0x97, 0x81, 0xe5, 0x2e, 0xdb, 0xc0, 0xdc, 0x56, 0x1a, 0xff, 0x6f, + 0x68, 0xdc, 0xad, 0x0c, 0xbc, 0xeb, 0xde, 0x6b, 0x91, 0x03, 0xd8, 0xf5, 0xc8, 0x9c, 0xba, 0xc4, + 0x0a, 0x5d, 0x6a, 0x39, 0xb1, 0xb4, 0xa8, 0xa7, 0x1e, 0x5d, 0x09, 0x57, 0xf5, 0xc5, 0xb9, 0x4b, + 0x7b, 0xb1, 0x3c, 0xf5, 0xd0, 0x73, 0x28, 0x3b, 0xbe, 0x2d, 0x97, 0x6b, 0x17, 0xd4, 0x94, 0x91, + 0x60, 0xe9, 0x6e, 0x4e, 0x41, 0xa5, 0xf4, 0xee, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x70, + 0x12, 0xaa, 0x75, 0x05, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/autotuning.proto b/executor/proto/tensorflow/core/protobuf/autotuning.proto new file mode 100644 index 0000000000..f43dbbeac5 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/autotuning.proto @@ -0,0 +1,86 @@ +// This file defines protos that store the results of autotuning various +// operations. +// +// They are in proto format because we want to log them structured. They offer +// tremendous statistical, testing, and debugging value. +syntax = "proto3"; + +package tensorflow; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; + +message CudnnVersion { + int32 major = 1; + int32 minor = 2; + int32 patch = 3; +} + +message ComputeCapability { + int32 major = 1; + int32 minor = 2; +} + +message AutotuneResult { + enum FailureKind { + UNKNOWN = 0; + REDZONE_MODIFIED = 1; + WRONG_RESULT = 2; + } + + message FailureResult { + FailureKind kind = 1; + string msg = 2; + + // For failure_kind == WRONG_RESULT, this field indicates the reference + // configuration that we compared against. + // + // Note that the reference algorithm isn't always correct. However, + // empirically it's more correct, as it's "algo 0", less fancy than the + // compared one. + oneof key { + ConvKey reference_conv = 11; + GemmKey reference_gemm = 12; + } + + int64 buffer_address = 13; + } + + message ConvKey { + int64 algorithm = 1; + bool tensor_ops_enabled = 2; + } + + message GemmKey { + int64 algorithm = 1; + } + + int64 scratch_bytes = 8; + google.protobuf.Duration run_time = 9; + + FailureResult failure = 7; + + oneof key { + ConvKey conv = 5; + GemmKey gemm = 6; + } + + // Next ID: 14 +} + +message AutotuningLog { + google.protobuf.Any instr = 1; + + // Records all auto-tuning results per algorithm. + repeated AutotuneResult results = 2; + + CudnnVersion cudnn_version = 3; + ComputeCapability compute_capability = 4; + + // stream_executor::DeviceDescription::pci_bus_id. + string device_pci_bus_id = 5; + + string blas_version = 6; + + // Next ID: 7 +} diff --git a/executor/proto/tensorflow/core/protobuf/cluster.pb.go b/executor/proto/tensorflow/core/protobuf/cluster.pb.go new file mode 100644 index 0000000000..5726251013 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/cluster.pb.go @@ -0,0 +1,146 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/cluster.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Defines a single job in a TensorFlow cluster. +type JobDef struct { + // The name of this job. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapping from task ID to "hostname:port" string. + // + // If the `name` field contains "worker", and the `tasks` map contains a + // mapping from 7 to "example.org:2222", then the device prefix + // "/job:worker/task:7" will be assigned to "example.org:2222". + Tasks map[int32]string `protobuf:"bytes,2,rep,name=tasks,proto3" json:"tasks,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *JobDef) Reset() { *m = JobDef{} } +func (m *JobDef) String() string { return proto.CompactTextString(m) } +func (*JobDef) ProtoMessage() {} +func (*JobDef) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea47a9615190cff, []int{0} +} + +func (m *JobDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_JobDef.Unmarshal(m, b) +} +func (m *JobDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_JobDef.Marshal(b, m, deterministic) +} +func (m *JobDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobDef.Merge(m, src) +} +func (m *JobDef) XXX_Size() int { + return xxx_messageInfo_JobDef.Size(m) +} +func (m *JobDef) XXX_DiscardUnknown() { + xxx_messageInfo_JobDef.DiscardUnknown(m) +} + +var xxx_messageInfo_JobDef proto.InternalMessageInfo + +func (m *JobDef) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *JobDef) GetTasks() map[int32]string { + if m != nil { + return m.Tasks + } + return nil +} + +// Defines a TensorFlow cluster as a set of jobs. +type ClusterDef struct { + // The jobs that comprise the cluster. + Job []*JobDef `protobuf:"bytes,1,rep,name=job,proto3" json:"job,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterDef) Reset() { *m = ClusterDef{} } +func (m *ClusterDef) String() string { return proto.CompactTextString(m) } +func (*ClusterDef) ProtoMessage() {} +func (*ClusterDef) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea47a9615190cff, []int{1} +} + +func (m *ClusterDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterDef.Unmarshal(m, b) +} +func (m *ClusterDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterDef.Marshal(b, m, deterministic) +} +func (m *ClusterDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterDef.Merge(m, src) +} +func (m *ClusterDef) XXX_Size() int { + return xxx_messageInfo_ClusterDef.Size(m) +} +func (m *ClusterDef) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterDef.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterDef proto.InternalMessageInfo + +func (m *ClusterDef) GetJob() []*JobDef { + if m != nil { + return m.Job + } + return nil +} + +func init() { + proto.RegisterType((*JobDef)(nil), "tensorflow.JobDef") + proto.RegisterMapType((map[int32]string)(nil), "tensorflow.JobDef.TasksEntry") + proto.RegisterType((*ClusterDef)(nil), "tensorflow.ClusterDef") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/cluster.proto", fileDescriptor_8ea47a9615190cff) +} + +var fileDescriptor_8ea47a9615190cff = []byte{ + // 250 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x50, 0xc1, 0x4a, 0xc3, 0x40, + 0x10, 0x65, 0x13, 0x53, 0x70, 0x44, 0x90, 0xc5, 0x43, 0x28, 0x08, 0xa5, 0x88, 0xf4, 0xb4, 0x81, + 0xf6, 0x52, 0xc4, 0x53, 0xd5, 0x8b, 0xa7, 0x12, 0x3c, 0x79, 0xcb, 0xc6, 0x4d, 0x8c, 0x4d, 0x76, + 0x64, 0x77, 0x56, 0xe9, 0x37, 0xf8, 0xc3, 0x1e, 0x65, 0x77, 0x85, 0x54, 0xf4, 0xf6, 0x66, 0xde, + 0xcc, 0xbc, 0x37, 0x0f, 0xae, 0x48, 0x69, 0x8b, 0xa6, 0xe9, 0xf1, 0xa3, 0xa8, 0xd1, 0xa8, 0xe2, + 0xcd, 0x20, 0xa1, 0x74, 0x4d, 0x51, 0xf7, 0xce, 0x92, 0x32, 0x22, 0x34, 0x38, 0x8c, 0x73, 0xf3, + 0x4f, 0x06, 0x93, 0x07, 0x94, 0x77, 0xaa, 0xe1, 0x1c, 0x8e, 0x74, 0x35, 0xa8, 0x9c, 0xcd, 0xd8, + 0xe2, 0xb8, 0x0c, 0x98, 0xaf, 0x20, 0xa3, 0xca, 0xee, 0x6c, 0x9e, 0xcc, 0xd2, 0xc5, 0xc9, 0xf2, + 0x42, 0x8c, 0xab, 0x22, 0xae, 0x89, 0x47, 0xcf, 0xdf, 0x6b, 0x32, 0xfb, 0x32, 0xce, 0x4e, 0xd7, + 0x00, 0x63, 0x93, 0x9f, 0x41, 0xba, 0x53, 0xfb, 0x70, 0x35, 0x2b, 0x3d, 0xe4, 0xe7, 0x90, 0xbd, + 0x57, 0xbd, 0x53, 0x79, 0x12, 0x94, 0x62, 0x71, 0x9d, 0xac, 0xd9, 0x7c, 0x09, 0x70, 0x1b, 0xad, + 0x7a, 0x43, 0x97, 0x90, 0xbe, 0xa2, 0xcc, 0x59, 0x90, 0xe6, 0x7f, 0xa5, 0x4b, 0x4f, 0x6f, 0x34, + 0x4c, 0xd1, 0xb4, 0x87, 0xec, 0x73, 0x67, 0xc9, 0x38, 0x4d, 0xdd, 0xa0, 0x36, 0xa7, 0x3f, 0xf7, + 0xb6, 0xfe, 0x73, 0xbb, 0x65, 0x4f, 0x37, 0x6d, 0x47, 0x2f, 0x4e, 0x8a, 0x1a, 0x87, 0xe2, 0x20, + 0xaf, 0xff, 0x61, 0x8b, 0xbf, 0x83, 0xfc, 0x62, 0x4c, 0x4e, 0x42, 0xb1, 0xfa, 0x0e, 0x00, 0x00, + 0xff, 0xff, 0xb9, 0x8e, 0x3e, 0x8f, 0x6e, 0x01, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/cluster.proto b/executor/proto/tensorflow/core/protobuf/cluster.proto new file mode 100644 index 0000000000..c696d345e0 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/cluster.proto @@ -0,0 +1,83 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "ClusterProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.distruntime"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf"; + +// This file contains protos to be used when defining a TensorFlow +// cluster. +// +// EXAMPLES +// -------- +// +// 1. A single-process cluster, containing "/job:local/task:0". +// +// Cluster: +// job { name: 'local' tasks { key: 0 value: 'localhost:2222' } } +// +// Server: +// cluster { $CLUSTER } job_name: 'local' task_index: 0 +// +// 2. A two-process cluster, containing "/job:local/task:{0,1}". +// +// Cluster: +// job { name: 'local' tasks { key: 0 value: 'localhost:2222' } +// tasks { key: 1 value: 'localhost:2223' } } +// +// Servers: +// cluster { $CLUSTER } job_name: 'local' task_index: 0 +// cluster { $CLUSTER } job_name: 'local' task_index: 1 +// +// 3. A two-job cluster, containing "/job:worker/task:{0,1,2}" and +// "/job:ps/task:{0,1}". +// +// Cluster: +// job { name: 'worker' tasks { key: 0 value: 'worker1:2222' } +// tasks { key: 1 value: 'worker2:2222' } +// tasks { key: 2 value: 'worker3:2222' } } +// job { name: 'ps' tasks { key: 0 value: 'ps0:2222' } +// tasks { key: 1 value: 'ps1:2222' } } +// +// Servers: +// cluster { $CLUSTER } job_name: 'worker' task_index: 0 +// cluster { $CLUSTER } job_name: 'worker' task_index: 1 +// cluster { $CLUSTER } job_name: 'worker' task_index: 2 +// cluster { $CLUSTER } job_name: 'ps' task_index: 0 +// cluster { $CLUSTER } job_name: 'ps' task_index: 1 + +// Defines a single job in a TensorFlow cluster. +message JobDef { + // The name of this job. + string name = 1; + + // Mapping from task ID to "hostname:port" string. + // + // If the `name` field contains "worker", and the `tasks` map contains a + // mapping from 7 to "example.org:2222", then the device prefix + // "/job:worker/task:7" will be assigned to "example.org:2222". + map tasks = 2; +} + +// Defines a TensorFlow cluster as a set of jobs. +message ClusterDef { + // The jobs that comprise the cluster. + repeated JobDef job = 1; +} diff --git a/executor/proto/tensorflow/core/protobuf/config.pb.go b/executor/proto/tensorflow/core/protobuf/config.pb.go new file mode 100644 index 0000000000..5bc4ff4384 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/config.pb.go @@ -0,0 +1,2043 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/config.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + framework "github.com/tensorflow/tensorflow/tensorflow/go/core/framework" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Optimization level +type OptimizerOptions_Level int32 + +const ( + // L1 is the default level. + // Optimization performed at L1 : + // 1. Common subexpression elimination + // 2. Constant folding + OptimizerOptions_L1 OptimizerOptions_Level = 0 + // No optimizations + OptimizerOptions_L0 OptimizerOptions_Level = -1 +) + +var OptimizerOptions_Level_name = map[int32]string{ + 0: "L1", + -1: "L0", +} + +var OptimizerOptions_Level_value = map[string]int32{ + "L1": 0, + "L0": -1, +} + +func (x OptimizerOptions_Level) String() string { + return proto.EnumName(OptimizerOptions_Level_name, int32(x)) +} + +func (OptimizerOptions_Level) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e2349c44c118036b, []int{1, 0} +} + +// Control the use of the compiler/jit. Experimental. +type OptimizerOptions_GlobalJitLevel int32 + +const ( + OptimizerOptions_DEFAULT OptimizerOptions_GlobalJitLevel = 0 + OptimizerOptions_OFF OptimizerOptions_GlobalJitLevel = -1 + // The following settings turn on compilation, with higher values being + // more aggressive. Higher values may reduce opportunities for parallelism + // and may use more memory. (At present, there is no distinction, but this + // is expected to change.) + OptimizerOptions_ON_1 OptimizerOptions_GlobalJitLevel = 1 + OptimizerOptions_ON_2 OptimizerOptions_GlobalJitLevel = 2 +) + +var OptimizerOptions_GlobalJitLevel_name = map[int32]string{ + 0: "DEFAULT", + -1: "OFF", + 1: "ON_1", + 2: "ON_2", +} + +var OptimizerOptions_GlobalJitLevel_value = map[string]int32{ + "DEFAULT": 0, + "OFF": -1, + "ON_1": 1, + "ON_2": 2, +} + +func (x OptimizerOptions_GlobalJitLevel) String() string { + return proto.EnumName(OptimizerOptions_GlobalJitLevel_name, int32(x)) +} + +func (OptimizerOptions_GlobalJitLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e2349c44c118036b, []int{1, 1} +} + +// TODO(pbar) Turn this into a TraceOptions proto which allows +// tracing to be controlled in a more orthogonal manner? +type RunOptions_TraceLevel int32 + +const ( + RunOptions_NO_TRACE RunOptions_TraceLevel = 0 + RunOptions_SOFTWARE_TRACE RunOptions_TraceLevel = 1 + RunOptions_HARDWARE_TRACE RunOptions_TraceLevel = 2 + RunOptions_FULL_TRACE RunOptions_TraceLevel = 3 +) + +var RunOptions_TraceLevel_name = map[int32]string{ + 0: "NO_TRACE", + 1: "SOFTWARE_TRACE", + 2: "HARDWARE_TRACE", + 3: "FULL_TRACE", +} + +var RunOptions_TraceLevel_value = map[string]int32{ + "NO_TRACE": 0, + "SOFTWARE_TRACE": 1, + "HARDWARE_TRACE": 2, + "FULL_TRACE": 3, +} + +func (x RunOptions_TraceLevel) String() string { + return proto.EnumName(RunOptions_TraceLevel_name, int32(x)) +} + +func (RunOptions_TraceLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e2349c44c118036b, []int{7, 0} +} + +type GPUOptions struct { + // Fraction of the available GPU memory to allocate for each process. + // 1 means to allocate all of the GPU memory, 0.5 means the process + // allocates up to ~50% of the available GPU memory. + // + // GPU memory is pre-allocated unless the allow_growth option is enabled. + // + // If greater than 1.0, uses CUDA unified memory to potentially oversubscribe + // the amount of memory available on the GPU device by using host memory as a + // swap space. Accessing memory not available on the device will be + // significantly slower as that would require memory transfer between the host + // and the device. Options to reduce the memory requirement should be + // considered before enabling this option as this may come with a negative + // performance impact. Oversubscription using the unified memory requires + // Pascal class or newer GPUs and it is currently only supported on the Linux + // operating system. See + // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements + // for the detailed requirements. + PerProcessGpuMemoryFraction float64 `protobuf:"fixed64,1,opt,name=per_process_gpu_memory_fraction,json=perProcessGpuMemoryFraction,proto3" json:"per_process_gpu_memory_fraction,omitempty"` + // If true, the allocator does not pre-allocate the entire specified + // GPU memory region, instead starting small and growing as needed. + AllowGrowth bool `protobuf:"varint,4,opt,name=allow_growth,json=allowGrowth,proto3" json:"allow_growth,omitempty"` + // The type of GPU allocation strategy to use. + // + // Allowed values: + // "": The empty string (default) uses a system-chosen default + // which may change over time. + // + // "BFC": A "Best-fit with coalescing" algorithm, simplified from a + // version of dlmalloc. + AllocatorType string `protobuf:"bytes,2,opt,name=allocator_type,json=allocatorType,proto3" json:"allocator_type,omitempty"` + // Delay deletion of up to this many bytes to reduce the number of + // interactions with gpu driver code. If 0, the system chooses + // a reasonable default (several MBs). + DeferredDeletionBytes int64 `protobuf:"varint,3,opt,name=deferred_deletion_bytes,json=deferredDeletionBytes,proto3" json:"deferred_deletion_bytes,omitempty"` + // A comma-separated list of GPU ids that determines the 'visible' + // to 'virtual' mapping of GPU devices. For example, if TensorFlow + // can see 8 GPU devices in the process, and one wanted to map + // visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1", + // then one would specify this field as "5,3". This field is similar in + // spirit to the CUDA_VISIBLE_DEVICES environment variable, except + // it applies to the visible GPU devices in the process. + // + // NOTE: + // 1. The GPU driver provides the process with the visible GPUs + // in an order which is not guaranteed to have any correlation to + // the *physical* GPU id in the machine. This field is used for + // remapping "visible" to "virtual", which means this operates only + // after the process starts. Users are required to use vendor + // specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the + // physical to visible device mapping prior to invoking TensorFlow. + // 2. In the code, the ids in this list are also called "platform GPU id"s, + // and the 'virtual' ids of GPU devices (i.e. the ids in the device + // name "/device:GPU:") are also called "TF GPU id"s. Please + // refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h + // for more information. + VisibleDeviceList string `protobuf:"bytes,5,opt,name=visible_device_list,json=visibleDeviceList,proto3" json:"visible_device_list,omitempty"` + // In the event polling loop sleep this many microseconds between + // PollEvents calls, when the queue is not empty. If value is not + // set or set to 0, gets set to a non-zero default. + PollingActiveDelayUsecs int32 `protobuf:"varint,6,opt,name=polling_active_delay_usecs,json=pollingActiveDelayUsecs,proto3" json:"polling_active_delay_usecs,omitempty"` + // This field is deprecated and ignored. + PollingInactiveDelayMsecs int32 `protobuf:"varint,7,opt,name=polling_inactive_delay_msecs,json=pollingInactiveDelayMsecs,proto3" json:"polling_inactive_delay_msecs,omitempty"` + // Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow, + // enabling this option forces all CPU tensors to be allocated with Cuda + // pinned memory. Normally, TensorFlow will infer which tensors should be + // allocated as the pinned memory. But in case where the inference is + // incomplete, this option can significantly speed up the cross-device memory + // copy performance as long as it fits the memory. + // Note that this option is not something that should be + // enabled by default for unknown or very large models, since all Cuda pinned + // memory is unpageable, having too much pinned memory might negatively impact + // the overall host system performance. + ForceGpuCompatible bool `protobuf:"varint,8,opt,name=force_gpu_compatible,json=forceGpuCompatible,proto3" json:"force_gpu_compatible,omitempty"` + // Everything inside experimental is subject to change and is not subject + // to API stability guarantees in + // https://www.tensorflow.org/guide/version_compat. + Experimental *GPUOptions_Experimental `protobuf:"bytes,9,opt,name=experimental,proto3" json:"experimental,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GPUOptions) Reset() { *m = GPUOptions{} } +func (m *GPUOptions) String() string { return proto.CompactTextString(m) } +func (*GPUOptions) ProtoMessage() {} +func (*GPUOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e2349c44c118036b, []int{0} +} + +func (m *GPUOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GPUOptions.Unmarshal(m, b) +} +func (m *GPUOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GPUOptions.Marshal(b, m, deterministic) +} +func (m *GPUOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_GPUOptions.Merge(m, src) +} +func (m *GPUOptions) XXX_Size() int { + return xxx_messageInfo_GPUOptions.Size(m) +} +func (m *GPUOptions) XXX_DiscardUnknown() { + xxx_messageInfo_GPUOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_GPUOptions proto.InternalMessageInfo + +func (m *GPUOptions) GetPerProcessGpuMemoryFraction() float64 { + if m != nil { + return m.PerProcessGpuMemoryFraction + } + return 0 +} + +func (m *GPUOptions) GetAllowGrowth() bool { + if m != nil { + return m.AllowGrowth + } + return false +} + +func (m *GPUOptions) GetAllocatorType() string { + if m != nil { + return m.AllocatorType + } + return "" +} + +func (m *GPUOptions) GetDeferredDeletionBytes() int64 { + if m != nil { + return m.DeferredDeletionBytes + } + return 0 +} + +func (m *GPUOptions) GetVisibleDeviceList() string { + if m != nil { + return m.VisibleDeviceList + } + return "" +} + +func (m *GPUOptions) GetPollingActiveDelayUsecs() int32 { + if m != nil { + return m.PollingActiveDelayUsecs + } + return 0 +} + +func (m *GPUOptions) GetPollingInactiveDelayMsecs() int32 { + if m != nil { + return m.PollingInactiveDelayMsecs + } + return 0 +} + +func (m *GPUOptions) GetForceGpuCompatible() bool { + if m != nil { + return m.ForceGpuCompatible + } + return false +} + +func (m *GPUOptions) GetExperimental() *GPUOptions_Experimental { + if m != nil { + return m.Experimental + } + return nil +} + +type GPUOptions_Experimental struct { + // The multi virtual device settings. If empty (not set), it will create + // single virtual device on each visible GPU, according to the settings + // in "visible_device_list" above. Otherwise, the number of elements in the + // list must be the same as the number of visible GPUs (after + // "visible_device_list" filtering if it is set), and the string represented + // device names (e.g. /device:GPU:) will refer to the virtual + // devices and have the field assigned sequentially starting from 0, + // according to the order they appear in this list and the "memory_limit" + // list inside each element. For example, + // visible_device_list = "1,0" + // virtual_devices { memory_limit: 1GB memory_limit: 2GB } + // virtual_devices {} + // will create three virtual devices as: + // /device:GPU:0 -> visible GPU 1 with 1GB memory + // /device:GPU:1 -> visible GPU 1 with 2GB memory + // /device:GPU:2 -> visible GPU 0 with all available memory + // + // NOTE: + // 1. It's invalid to set both this and "per_process_gpu_memory_fraction" + // at the same time. + // 2. Currently this setting is per-process, not per-session. Using + // different settings in different sessions within same process will + // result in undefined behavior. + VirtualDevices []*GPUOptions_Experimental_VirtualDevices `protobuf:"bytes,1,rep,name=virtual_devices,json=virtualDevices,proto3" json:"virtual_devices,omitempty"` + // If true, uses CUDA unified memory for memory allocations. If + // per_process_gpu_memory_fraction option is greater than 1.0, then unified + // memory is used regardless of the value for this field. See comments for + // per_process_gpu_memory_fraction field for more details and requirements + // of the unified memory. This option is useful to oversubscribe memory if + // multiple processes are sharing a single GPU while individually using less + // than 1.0 per process memory fraction. + UseUnifiedMemory bool `protobuf:"varint,2,opt,name=use_unified_memory,json=useUnifiedMemory,proto3" json:"use_unified_memory,omitempty"` + // If > 1, the number of device-to-device copy streams to create + // for each GPUDevice. Default value is 0, which is automatically + // converted to 1. + NumDevToDevCopyStreams int32 `protobuf:"varint,3,opt,name=num_dev_to_dev_copy_streams,json=numDevToDevCopyStreams,proto3" json:"num_dev_to_dev_copy_streams,omitempty"` + // If non-empty, defines a good GPU ring order on a single worker based on + // device interconnect. This assumes that all workers have the same GPU + // topology. Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4". + // This ring order is used by the RingReducer implementation of + // CollectiveReduce, and serves as an override to automatic ring order + // generation in OrderTaskDeviceMap() during CollectiveParam resolution. + CollectiveRingOrder string `protobuf:"bytes,4,opt,name=collective_ring_order,json=collectiveRingOrder,proto3" json:"collective_ring_order,omitempty"` + // If true then extra work is done by GPUDevice and GPUBFCAllocator to + // keep track of when GPU memory is freed and when kernels actually + // complete so that we can know when a nominally free memory chunk + // is really not subject to pending use. + TimestampedAllocator bool `protobuf:"varint,5,opt,name=timestamped_allocator,json=timestampedAllocator,proto3" json:"timestamped_allocator,omitempty"` + // Parameters for GPUKernelTracker. By default no kernel tracking is done. + // Note that timestamped_allocator is only effective if some tracking is + // specified. + // + // If kernel_tracker_max_interval = n > 0, then a tracking event + // is inserted after every n kernels without an event. + KernelTrackerMaxInterval int32 `protobuf:"varint,7,opt,name=kernel_tracker_max_interval,json=kernelTrackerMaxInterval,proto3" json:"kernel_tracker_max_interval,omitempty"` + // If kernel_tracker_max_bytes = n > 0, then a tracking event is + // inserted after every series of kernels allocating a sum of + // memory >= n. If one kernel allocates b * n bytes, then one + // event will be inserted after it, but it will count as b against + // the pending limit. + KernelTrackerMaxBytes int32 `protobuf:"varint,8,opt,name=kernel_tracker_max_bytes,json=kernelTrackerMaxBytes,proto3" json:"kernel_tracker_max_bytes,omitempty"` + // If kernel_tracker_max_pending > 0 then no more than this many + // tracking events can be outstanding at a time. An attempt to + // launch an additional kernel will stall until an event + // completes. + KernelTrackerMaxPending int32 `protobuf:"varint,9,opt,name=kernel_tracker_max_pending,json=kernelTrackerMaxPending,proto3" json:"kernel_tracker_max_pending,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GPUOptions_Experimental) Reset() { *m = GPUOptions_Experimental{} } +func (m *GPUOptions_Experimental) String() string { return proto.CompactTextString(m) } +func (*GPUOptions_Experimental) ProtoMessage() {} +func (*GPUOptions_Experimental) Descriptor() ([]byte, []int) { + return fileDescriptor_e2349c44c118036b, []int{0, 0} +} + +func (m *GPUOptions_Experimental) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GPUOptions_Experimental.Unmarshal(m, b) +} +func (m *GPUOptions_Experimental) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GPUOptions_Experimental.Marshal(b, m, deterministic) +} +func (m *GPUOptions_Experimental) XXX_Merge(src proto.Message) { + xxx_messageInfo_GPUOptions_Experimental.Merge(m, src) +} +func (m *GPUOptions_Experimental) XXX_Size() int { + return xxx_messageInfo_GPUOptions_Experimental.Size(m) +} +func (m *GPUOptions_Experimental) XXX_DiscardUnknown() { + xxx_messageInfo_GPUOptions_Experimental.DiscardUnknown(m) +} + +var xxx_messageInfo_GPUOptions_Experimental proto.InternalMessageInfo + +func (m *GPUOptions_Experimental) GetVirtualDevices() []*GPUOptions_Experimental_VirtualDevices { + if m != nil { + return m.VirtualDevices + } + return nil +} + +func (m *GPUOptions_Experimental) GetUseUnifiedMemory() bool { + if m != nil { + return m.UseUnifiedMemory + } + return false +} + +func (m *GPUOptions_Experimental) GetNumDevToDevCopyStreams() int32 { + if m != nil { + return m.NumDevToDevCopyStreams + } + return 0 +} + +func (m *GPUOptions_Experimental) GetCollectiveRingOrder() string { + if m != nil { + return m.CollectiveRingOrder + } + return "" +} + +func (m *GPUOptions_Experimental) GetTimestampedAllocator() bool { + if m != nil { + return m.TimestampedAllocator + } + return false +} + +func (m *GPUOptions_Experimental) GetKernelTrackerMaxInterval() int32 { + if m != nil { + return m.KernelTrackerMaxInterval + } + return 0 +} + +func (m *GPUOptions_Experimental) GetKernelTrackerMaxBytes() int32 { + if m != nil { + return m.KernelTrackerMaxBytes + } + return 0 +} + +func (m *GPUOptions_Experimental) GetKernelTrackerMaxPending() int32 { + if m != nil { + return m.KernelTrackerMaxPending + } + return 0 +} + +// Configuration for breaking down a visible GPU into multiple "virtual" +// devices. +type GPUOptions_Experimental_VirtualDevices struct { + // Per "virtual" device memory limit, in MB. The number of elements in + // the list is the number of virtual devices to create on the + // corresponding visible GPU (see "virtual_devices" below). + // If empty, it will create single virtual device taking all available + // memory from the device. + // + // For the concept of "visible" and "virtual" GPU, see the comments for + // "visible_device_list" above for more information. + MemoryLimitMb []float32 `protobuf:"fixed32,1,rep,packed,name=memory_limit_mb,json=memoryLimitMb,proto3" json:"memory_limit_mb,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GPUOptions_Experimental_VirtualDevices) Reset() { + *m = GPUOptions_Experimental_VirtualDevices{} +} +func (m *GPUOptions_Experimental_VirtualDevices) String() string { return proto.CompactTextString(m) } +func (*GPUOptions_Experimental_VirtualDevices) ProtoMessage() {} +func (*GPUOptions_Experimental_VirtualDevices) Descriptor() ([]byte, []int) { + return fileDescriptor_e2349c44c118036b, []int{0, 0, 0} +} + +func (m *GPUOptions_Experimental_VirtualDevices) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GPUOptions_Experimental_VirtualDevices.Unmarshal(m, b) +} +func (m *GPUOptions_Experimental_VirtualDevices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GPUOptions_Experimental_VirtualDevices.Marshal(b, m, deterministic) +} +func (m *GPUOptions_Experimental_VirtualDevices) XXX_Merge(src proto.Message) { + xxx_messageInfo_GPUOptions_Experimental_VirtualDevices.Merge(m, src) +} +func (m *GPUOptions_Experimental_VirtualDevices) XXX_Size() int { + return xxx_messageInfo_GPUOptions_Experimental_VirtualDevices.Size(m) +} +func (m *GPUOptions_Experimental_VirtualDevices) XXX_DiscardUnknown() { + xxx_messageInfo_GPUOptions_Experimental_VirtualDevices.DiscardUnknown(m) +} + +var xxx_messageInfo_GPUOptions_Experimental_VirtualDevices proto.InternalMessageInfo + +func (m *GPUOptions_Experimental_VirtualDevices) GetMemoryLimitMb() []float32 { + if m != nil { + return m.MemoryLimitMb + } + return nil +} + +// Options passed to the graph optimizer +type OptimizerOptions struct { + // If true, optimize the graph using common subexpression elimination. + DoCommonSubexpressionElimination bool `protobuf:"varint,1,opt,name=do_common_subexpression_elimination,json=doCommonSubexpressionElimination,proto3" json:"do_common_subexpression_elimination,omitempty"` + // If true, perform constant folding optimization on the graph. + DoConstantFolding bool `protobuf:"varint,2,opt,name=do_constant_folding,json=doConstantFolding,proto3" json:"do_constant_folding,omitempty"` + // Constant folding optimization replaces tensors whose values can be + // predetermined, with constant nodes. To avoid inserting too large constants, + // the size of each constant created can be limited. If this value is zero, a + // default limit of 10 MiB will be applied. If constant folding optimization + // is disabled, this value is ignored. + MaxFoldedConstantInBytes int64 `protobuf:"varint,6,opt,name=max_folded_constant_in_bytes,json=maxFoldedConstantInBytes,proto3" json:"max_folded_constant_in_bytes,omitempty"` + // If true, perform function inlining on the graph. + DoFunctionInlining bool `protobuf:"varint,4,opt,name=do_function_inlining,json=doFunctionInlining,proto3" json:"do_function_inlining,omitempty"` + // Overall optimization level. The actual optimizations applied will be the + // logical OR of the flags that this level implies and any flags already set. + OptLevel OptimizerOptions_Level `protobuf:"varint,3,opt,name=opt_level,json=optLevel,proto3,enum=tensorflow.OptimizerOptions_Level" json:"opt_level,omitempty"` + GlobalJitLevel OptimizerOptions_GlobalJitLevel `protobuf:"varint,5,opt,name=global_jit_level,json=globalJitLevel,proto3,enum=tensorflow.OptimizerOptions_GlobalJitLevel" json:"global_jit_level,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OptimizerOptions) Reset() { *m = OptimizerOptions{} } +func (m *OptimizerOptions) String() string { return proto.CompactTextString(m) } +func (*OptimizerOptions) ProtoMessage() {} +func (*OptimizerOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e2349c44c118036b, []int{1} +} + +func (m *OptimizerOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OptimizerOptions.Unmarshal(m, b) +} +func (m *OptimizerOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OptimizerOptions.Marshal(b, m, deterministic) +} +func (m *OptimizerOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OptimizerOptions.Merge(m, src) +} +func (m *OptimizerOptions) XXX_Size() int { + return xxx_messageInfo_OptimizerOptions.Size(m) +} +func (m *OptimizerOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OptimizerOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OptimizerOptions proto.InternalMessageInfo + +func (m *OptimizerOptions) GetDoCommonSubexpressionElimination() bool { + if m != nil { + return m.DoCommonSubexpressionElimination + } + return false +} + +func (m *OptimizerOptions) GetDoConstantFolding() bool { + if m != nil { + return m.DoConstantFolding + } + return false +} + +func (m *OptimizerOptions) GetMaxFoldedConstantInBytes() int64 { + if m != nil { + return m.MaxFoldedConstantInBytes + } + return 0 +} + +func (m *OptimizerOptions) GetDoFunctionInlining() bool { + if m != nil { + return m.DoFunctionInlining + } + return false +} + +func (m *OptimizerOptions) GetOptLevel() OptimizerOptions_Level { + if m != nil { + return m.OptLevel + } + return OptimizerOptions_L1 +} + +func (m *OptimizerOptions) GetGlobalJitLevel() OptimizerOptions_GlobalJitLevel { + if m != nil { + return m.GlobalJitLevel + } + return OptimizerOptions_DEFAULT +} + +type GraphOptions struct { + // If true, use control flow to schedule the activation of Recv nodes. + // (Currently ignored.) + EnableRecvScheduling bool `protobuf:"varint,2,opt,name=enable_recv_scheduling,json=enableRecvScheduling,proto3" json:"enable_recv_scheduling,omitempty"` + // Options controlling how graph is optimized. + OptimizerOptions *OptimizerOptions `protobuf:"bytes,3,opt,name=optimizer_options,json=optimizerOptions,proto3" json:"optimizer_options,omitempty"` + // The number of steps to run before returning a cost model detailing + // the memory usage and performance of each node of the graph. 0 means + // no cost model. + BuildCostModel int64 `protobuf:"varint,4,opt,name=build_cost_model,json=buildCostModel,proto3" json:"build_cost_model,omitempty"` + // The number of steps to skip before collecting statistics for the + // cost model. + BuildCostModelAfter int64 `protobuf:"varint,9,opt,name=build_cost_model_after,json=buildCostModelAfter,proto3" json:"build_cost_model_after,omitempty"` + // Annotate each Node with Op output shape data, to the extent it can + // be statically inferred. + InferShapes bool `protobuf:"varint,5,opt,name=infer_shapes,json=inferShapes,proto3" json:"infer_shapes,omitempty"` + // Only place the subgraphs that are run, rather than the entire graph. + // + // This is useful for interactive graph building, where one might + // produce graphs that cannot be placed during the debugging + // process. In particular, it allows the client to continue work in + // a session after adding a node to a graph whose placement + // constraints are unsatisfiable. + PlacePrunedGraph bool `protobuf:"varint,6,opt,name=place_pruned_graph,json=placePrunedGraph,proto3" json:"place_pruned_graph,omitempty"` + // If true, transfer float values between processes as bfloat16. + EnableBfloat16Sendrecv bool `protobuf:"varint,7,opt,name=enable_bfloat16_sendrecv,json=enableBfloat16Sendrecv,proto3" json:"enable_bfloat16_sendrecv,omitempty"` + // If > 0, record a timeline every this many steps. + // EXPERIMENTAL: This currently has no effect in MasterSession. + TimelineStep int32 `protobuf:"varint,8,opt,name=timeline_step,json=timelineStep,proto3" json:"timeline_step,omitempty"` + // Options that control the type and amount of graph rewriting. + // Not currently configurable via the public Python API (i.e. there is no API + // stability guarantee if you import RewriterConfig explicitly). + RewriteOptions *RewriterConfig `protobuf:"bytes,10,opt,name=rewrite_options,json=rewriteOptions,proto3" json:"rewrite_options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GraphOptions) Reset() { *m = GraphOptions{} } +func (m *GraphOptions) String() string { return proto.CompactTextString(m) } +func (*GraphOptions) ProtoMessage() {} +func (*GraphOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e2349c44c118036b, []int{2} +} + +func (m *GraphOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GraphOptions.Unmarshal(m, b) +} +func (m *GraphOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GraphOptions.Marshal(b, m, deterministic) +} +func (m *GraphOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_GraphOptions.Merge(m, src) +} +func (m *GraphOptions) XXX_Size() int { + return xxx_messageInfo_GraphOptions.Size(m) +} +func (m *GraphOptions) XXX_DiscardUnknown() { + xxx_messageInfo_GraphOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_GraphOptions proto.InternalMessageInfo + +func (m *GraphOptions) GetEnableRecvScheduling() bool { + if m != nil { + return m.EnableRecvScheduling + } + return false +} + +func (m *GraphOptions) GetOptimizerOptions() *OptimizerOptions { + if m != nil { + return m.OptimizerOptions + } + return nil +} + +func (m *GraphOptions) GetBuildCostModel() int64 { + if m != nil { + return m.BuildCostModel + } + return 0 +} + +func (m *GraphOptions) GetBuildCostModelAfter() int64 { + if m != nil { + return m.BuildCostModelAfter + } + return 0 +} + +func (m *GraphOptions) GetInferShapes() bool { + if m != nil { + return m.InferShapes + } + return false +} + +func (m *GraphOptions) GetPlacePrunedGraph() bool { + if m != nil { + return m.PlacePrunedGraph + } + return false +} + +func (m *GraphOptions) GetEnableBfloat16Sendrecv() bool { + if m != nil { + return m.EnableBfloat16Sendrecv + } + return false +} + +func (m *GraphOptions) GetTimelineStep() int32 { + if m != nil { + return m.TimelineStep + } + return 0 +} + +func (m *GraphOptions) GetRewriteOptions() *RewriterConfig { + if m != nil { + return m.RewriteOptions + } + return nil +} + +type ThreadPoolOptionProto struct { + // The number of threads in the pool. + // + // 0 means the system picks a value based on where this option proto is used + // (see the declaration of the specific field for more info). + NumThreads int32 `protobuf:"varint,1,opt,name=num_threads,json=numThreads,proto3" json:"num_threads,omitempty"` + // The global name of the threadpool. + // + // If empty, then the threadpool is made and used according to the scope it's + // in - e.g., for a session threadpool, it is used by that session only. + // + // If non-empty, then: + // - a global threadpool associated with this name is looked + // up or created. This allows, for example, sharing one threadpool across + // many sessions (e.g., like the default behavior, if + // inter_op_parallelism_threads is not configured), but still partitioning + // into a large and small pool. + // - if the threadpool for this global_name already exists, then it is an + // error if the existing pool was created using a different num_threads + // value as is specified on this call. + // - threadpools created this way are never garbage collected. + GlobalName string `protobuf:"bytes,2,opt,name=global_name,json=globalName,proto3" json:"global_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ThreadPoolOptionProto) Reset() { *m = ThreadPoolOptionProto{} } +func (m *ThreadPoolOptionProto) String() string { return proto.CompactTextString(m) } +func (*ThreadPoolOptionProto) ProtoMessage() {} +func (*ThreadPoolOptionProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e2349c44c118036b, []int{3} +} + +func (m *ThreadPoolOptionProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ThreadPoolOptionProto.Unmarshal(m, b) +} +func (m *ThreadPoolOptionProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ThreadPoolOptionProto.Marshal(b, m, deterministic) +} +func (m *ThreadPoolOptionProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ThreadPoolOptionProto.Merge(m, src) +} +func (m *ThreadPoolOptionProto) XXX_Size() int { + return xxx_messageInfo_ThreadPoolOptionProto.Size(m) +} +func (m *ThreadPoolOptionProto) XXX_DiscardUnknown() { + xxx_messageInfo_ThreadPoolOptionProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ThreadPoolOptionProto proto.InternalMessageInfo + +func (m *ThreadPoolOptionProto) GetNumThreads() int32 { + if m != nil { + return m.NumThreads + } + return 0 +} + +func (m *ThreadPoolOptionProto) GetGlobalName() string { + if m != nil { + return m.GlobalName + } + return "" +} + +type RPCOptions struct { + // If true, always use RPC to contact the session target. + // + // If false (the default option), TensorFlow may use an optimized + // transport for client-master communication that avoids the RPC + // stack. This option is primarily for used testing the RPC stack. + UseRpcForInprocessMaster bool `protobuf:"varint,1,opt,name=use_rpc_for_inprocess_master,json=useRpcForInprocessMaster,proto3" json:"use_rpc_for_inprocess_master,omitempty"` + // The compression algorithm to be used. One of "deflate", "gzip". + CompressionAlgorithm string `protobuf:"bytes,2,opt,name=compression_algorithm,json=compressionAlgorithm,proto3" json:"compression_algorithm,omitempty"` + // If compression_algorithm is set, the compression level to be used. + // From 0 (no compression), up to 3. + CompressionLevel int32 `protobuf:"varint,3,opt,name=compression_level,json=compressionLevel,proto3" json:"compression_level,omitempty"` + // Setting cache_rpc_response to true will enable sender side caching of + // response for RecvTensorAsync and RecvBufAsync to allow receiver to retry + // requests . This is only necessary when the network fabric is experiencing a + // significant error rate. Without it we'll fail a step on an network error, + // while with it we'll be able to complete long steps (like complex + // initializations) in the face of some network errors during RecvTensor. + CacheRpcResponse bool `protobuf:"varint,4,opt,name=cache_rpc_response,json=cacheRpcResponse,proto3" json:"cache_rpc_response,omitempty"` + // Disables TCP connection sharing when opening a new RPC channel. + DisableSessionConnectionSharing bool `protobuf:"varint,5,opt,name=disable_session_connection_sharing,json=disableSessionConnectionSharing,proto3" json:"disable_session_connection_sharing,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RPCOptions) Reset() { *m = RPCOptions{} } +func (m *RPCOptions) String() string { return proto.CompactTextString(m) } +func (*RPCOptions) ProtoMessage() {} +func (*RPCOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e2349c44c118036b, []int{4} +} + +func (m *RPCOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RPCOptions.Unmarshal(m, b) +} +func (m *RPCOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RPCOptions.Marshal(b, m, deterministic) +} +func (m *RPCOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RPCOptions.Merge(m, src) +} +func (m *RPCOptions) XXX_Size() int { + return xxx_messageInfo_RPCOptions.Size(m) +} +func (m *RPCOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RPCOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RPCOptions proto.InternalMessageInfo + +func (m *RPCOptions) GetUseRpcForInprocessMaster() bool { + if m != nil { + return m.UseRpcForInprocessMaster + } + return false +} + +func (m *RPCOptions) GetCompressionAlgorithm() string { + if m != nil { + return m.CompressionAlgorithm + } + return "" +} + +func (m *RPCOptions) GetCompressionLevel() int32 { + if m != nil { + return m.CompressionLevel + } + return 0 +} + +func (m *RPCOptions) GetCacheRpcResponse() bool { + if m != nil { + return m.CacheRpcResponse + } + return false +} + +func (m *RPCOptions) GetDisableSessionConnectionSharing() bool { + if m != nil { + return m.DisableSessionConnectionSharing + } + return false +} + +// Metadata about the session. +// +// This can be used by the runtime and the Ops for debugging, monitoring, etc. +// +// The (name, version) tuple is expected to be a unique identifier for +// sessions within the same process. +// +// NOTE: This is currently used and propagated only by the direct session. +type SessionMetadata struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The version is optional. If set, needs to be >= 0. + Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SessionMetadata) Reset() { *m = SessionMetadata{} } +func (m *SessionMetadata) String() string { return proto.CompactTextString(m) } +func (*SessionMetadata) ProtoMessage() {} +func (*SessionMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_e2349c44c118036b, []int{5} +} + +func (m *SessionMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SessionMetadata.Unmarshal(m, b) +} +func (m *SessionMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SessionMetadata.Marshal(b, m, deterministic) +} +func (m *SessionMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_SessionMetadata.Merge(m, src) +} +func (m *SessionMetadata) XXX_Size() int { + return xxx_messageInfo_SessionMetadata.Size(m) +} +func (m *SessionMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_SessionMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_SessionMetadata proto.InternalMessageInfo + +func (m *SessionMetadata) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *SessionMetadata) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +// Session configuration parameters. +// The system picks appropriate values for fields that are not set. +type ConfigProto struct { + // Map from device type name (e.g., "CPU" or "GPU" ) to maximum + // number of devices of that type to use. If a particular device + // type is not found in the map, the system picks an appropriate + // number. + DeviceCount map[string]int32 `protobuf:"bytes,1,rep,name=device_count,json=deviceCount,proto3" json:"device_count,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + // The execution of an individual op (for some op types) can be + // parallelized on a pool of intra_op_parallelism_threads. + // 0 means the system picks an appropriate number. + IntraOpParallelismThreads int32 `protobuf:"varint,2,opt,name=intra_op_parallelism_threads,json=intraOpParallelismThreads,proto3" json:"intra_op_parallelism_threads,omitempty"` + // Nodes that perform blocking operations are enqueued on a pool of + // inter_op_parallelism_threads available in each process. + // + // 0 means the system picks an appropriate number. + // Negative means all operations are performed in caller's thread. + // + // Note that the first Session created in the process sets the + // number of threads for all future sessions unless use_per_session_threads is + // true or session_inter_op_thread_pool is configured. + InterOpParallelismThreads int32 `protobuf:"varint,5,opt,name=inter_op_parallelism_threads,json=interOpParallelismThreads,proto3" json:"inter_op_parallelism_threads,omitempty"` + // If true, use a new set of threads for this session rather than the global + // pool of threads. Only supported by direct sessions. + // + // If false, use the global threads created by the first session, or the + // per-session thread pools configured by session_inter_op_thread_pool. + // + // This option is deprecated. The same effect can be achieved by setting + // session_inter_op_thread_pool to have one element, whose num_threads equals + // inter_op_parallelism_threads. + UsePerSessionThreads bool `protobuf:"varint,9,opt,name=use_per_session_threads,json=usePerSessionThreads,proto3" json:"use_per_session_threads,omitempty"` + // This option is experimental - it may be replaced with a different mechanism + // in the future. + // + // Configures session thread pools. If this is configured, then RunOptions for + // a Run call can select the thread pool to use. + // + // The intended use is for when some session invocations need to run in a + // background pool limited to a small number of threads: + // - For example, a session may be configured to have one large pool (for + // regular compute) and one small pool (for periodic, low priority work); + // using the small pool is currently the mechanism for limiting the inter-op + // parallelism of the low priority work. Note that it does not limit the + // parallelism of work spawned by a single op kernel implementation. + // - Using this setting is normally not needed in training, but may help some + // serving use cases. + // - It is also generally recommended to set the global_name field of this + // proto, to avoid creating multiple large pools. It is typically better to + // run the non-low-priority work, even across sessions, in a single large + // pool. + SessionInterOpThreadPool []*ThreadPoolOptionProto `protobuf:"bytes,12,rep,name=session_inter_op_thread_pool,json=sessionInterOpThreadPool,proto3" json:"session_inter_op_thread_pool,omitempty"` + // Assignment of Nodes to Devices is recomputed every placement_period + // steps until the system warms up (at which point the recomputation + // typically slows down automatically). + PlacementPeriod int32 `protobuf:"varint,3,opt,name=placement_period,json=placementPeriod,proto3" json:"placement_period,omitempty"` + // When any filters are present sessions will ignore all devices which do not + // match the filters. Each filter can be partially specified, e.g. "/job:ps" + // "/job:worker/replica:3", etc. + DeviceFilters []string `protobuf:"bytes,4,rep,name=device_filters,json=deviceFilters,proto3" json:"device_filters,omitempty"` + // Options that apply to all GPUs. + GpuOptions *GPUOptions `protobuf:"bytes,6,opt,name=gpu_options,json=gpuOptions,proto3" json:"gpu_options,omitempty"` + // Whether soft placement is allowed. If allow_soft_placement is true, + // an op will be placed on CPU if + // 1. there's no GPU implementation for the OP + // or + // 2. no GPU devices are known or registered + // or + // 3. need to co-locate with reftype input(s) which are from CPU. + AllowSoftPlacement bool `protobuf:"varint,7,opt,name=allow_soft_placement,json=allowSoftPlacement,proto3" json:"allow_soft_placement,omitempty"` + // Whether device placements should be logged. + LogDevicePlacement bool `protobuf:"varint,8,opt,name=log_device_placement,json=logDevicePlacement,proto3" json:"log_device_placement,omitempty"` + // Options that apply to all graphs. + GraphOptions *GraphOptions `protobuf:"bytes,10,opt,name=graph_options,json=graphOptions,proto3" json:"graph_options,omitempty"` + // Global timeout for all blocking operations in this session. If non-zero, + // and not overridden on a per-operation basis, this value will be used as the + // deadline for all blocking operations. + OperationTimeoutInMs int64 `protobuf:"varint,11,opt,name=operation_timeout_in_ms,json=operationTimeoutInMs,proto3" json:"operation_timeout_in_ms,omitempty"` + // Options that apply when this session uses the distributed runtime. + RpcOptions *RPCOptions `protobuf:"bytes,13,opt,name=rpc_options,json=rpcOptions,proto3" json:"rpc_options,omitempty"` + // Optional list of all workers to use in this session. + ClusterDef *ClusterDef `protobuf:"bytes,14,opt,name=cluster_def,json=clusterDef,proto3" json:"cluster_def,omitempty"` + // If true, any resources such as Variables used in the session will not be + // shared with other sessions. However, when clusterspec propagation is + // enabled, this field is ignored and sessions are always isolated. + IsolateSessionState bool `protobuf:"varint,15,opt,name=isolate_session_state,json=isolateSessionState,proto3" json:"isolate_session_state,omitempty"` + Experimental *ConfigProto_Experimental `protobuf:"bytes,16,opt,name=experimental,proto3" json:"experimental,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfigProto) Reset() { *m = ConfigProto{} } +func (m *ConfigProto) String() string { return proto.CompactTextString(m) } +func (*ConfigProto) ProtoMessage() {} +func (*ConfigProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e2349c44c118036b, []int{6} +} + +func (m *ConfigProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConfigProto.Unmarshal(m, b) +} +func (m *ConfigProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConfigProto.Marshal(b, m, deterministic) +} +func (m *ConfigProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigProto.Merge(m, src) +} +func (m *ConfigProto) XXX_Size() int { + return xxx_messageInfo_ConfigProto.Size(m) +} +func (m *ConfigProto) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigProto proto.InternalMessageInfo + +func (m *ConfigProto) GetDeviceCount() map[string]int32 { + if m != nil { + return m.DeviceCount + } + return nil +} + +func (m *ConfigProto) GetIntraOpParallelismThreads() int32 { + if m != nil { + return m.IntraOpParallelismThreads + } + return 0 +} + +func (m *ConfigProto) GetInterOpParallelismThreads() int32 { + if m != nil { + return m.InterOpParallelismThreads + } + return 0 +} + +func (m *ConfigProto) GetUsePerSessionThreads() bool { + if m != nil { + return m.UsePerSessionThreads + } + return false +} + +func (m *ConfigProto) GetSessionInterOpThreadPool() []*ThreadPoolOptionProto { + if m != nil { + return m.SessionInterOpThreadPool + } + return nil +} + +func (m *ConfigProto) GetPlacementPeriod() int32 { + if m != nil { + return m.PlacementPeriod + } + return 0 +} + +func (m *ConfigProto) GetDeviceFilters() []string { + if m != nil { + return m.DeviceFilters + } + return nil +} + +func (m *ConfigProto) GetGpuOptions() *GPUOptions { + if m != nil { + return m.GpuOptions + } + return nil +} + +func (m *ConfigProto) GetAllowSoftPlacement() bool { + if m != nil { + return m.AllowSoftPlacement + } + return false +} + +func (m *ConfigProto) GetLogDevicePlacement() bool { + if m != nil { + return m.LogDevicePlacement + } + return false +} + +func (m *ConfigProto) GetGraphOptions() *GraphOptions { + if m != nil { + return m.GraphOptions + } + return nil +} + +func (m *ConfigProto) GetOperationTimeoutInMs() int64 { + if m != nil { + return m.OperationTimeoutInMs + } + return 0 +} + +func (m *ConfigProto) GetRpcOptions() *RPCOptions { + if m != nil { + return m.RpcOptions + } + return nil +} + +func (m *ConfigProto) GetClusterDef() *ClusterDef { + if m != nil { + return m.ClusterDef + } + return nil +} + +func (m *ConfigProto) GetIsolateSessionState() bool { + if m != nil { + return m.IsolateSessionState + } + return false +} + +func (m *ConfigProto) GetExperimental() *ConfigProto_Experimental { + if m != nil { + return m.Experimental + } + return nil +} + +// Everything inside Experimental is subject to change and is not subject +// to API stability guarantees in +// https://www.tensorflow.org/guide/version_compat. +type ConfigProto_Experimental struct { + // Task name for group resolution. + CollectiveGroupLeader string `protobuf:"bytes,1,opt,name=collective_group_leader,json=collectiveGroupLeader,proto3" json:"collective_group_leader,omitempty"` + // Which executor to use, the default executor will be used + // if it is an empty string or "DEFAULT" + ExecutorType string `protobuf:"bytes,3,opt,name=executor_type,json=executorType,proto3" json:"executor_type,omitempty"` + // Guidance to formatting of large RecvBuf fields for transfer. + // Any positive value sets the max chunk size. 0 defaults to 4096. + // Any negative value indicates no max, i.e. one chunk only. + RecvBufMaxChunk int32 `protobuf:"varint,4,opt,name=recv_buf_max_chunk,json=recvBufMaxChunk,proto3" json:"recv_buf_max_chunk,omitempty"` + // If true, and supported by the platform, the runtime will attempt to + // use NUMA affinity where applicable. One consequence will be the + // existence of as many CPU devices as there are available NUMA nodes. + UseNumaAffinity bool `protobuf:"varint,5,opt,name=use_numa_affinity,json=useNumaAffinity,proto3" json:"use_numa_affinity,omitempty"` + // If true, make collective op execution order sequential and deterministic + // for potentially concurrent collective instances. + CollectiveDeterministicSequentialExecution bool `protobuf:"varint,6,opt,name=collective_deterministic_sequential_execution,json=collectiveDeterministicSequentialExecution,proto3" json:"collective_deterministic_sequential_execution,omitempty"` + // If true, use NCCL for CollectiveOps. This feature is highly + // experimental. + CollectiveNccl bool `protobuf:"varint,7,opt,name=collective_nccl,json=collectiveNccl,proto3" json:"collective_nccl,omitempty"` + // In the following, session state means the value of a variable, elements + // in a hash table, or any other resource, accessible by worker sessions + // held by a TF server. + // + // When ClusterSpec propagation is enabled, the value of + // isolate_session_state is ignored when deciding whether to share session + // states in a TF server (for backwards compatibility reasons). + // - If share_session_state_in_clusterspec_propagation is true, the session + // states are shared. + // - If share_session_state_in_clusterspec_propagation is false, session + // states are isolated. + // + // When clusterspec propagation is not used, the value of + // share_session_state_in_clusterspec_propagation is ignored when deciding + // whether to share session states in a TF server. + // - If isolate_session_state is true, session states are isolated. + // - If isolate_session_state is false, session states are shared. + // + // TODO(b/129330037): Add a single API that consistently treats + // isolate_session_state and ClusterSpec propagation. + ShareSessionStateInClusterspecPropagation bool `protobuf:"varint,8,opt,name=share_session_state_in_clusterspec_propagation,json=shareSessionStateInClusterspecPropagation,proto3" json:"share_session_state_in_clusterspec_propagation,omitempty"` + // If using a direct session, disable spinning while waiting for work in + // the thread pool. This may result in higher latency for completing ops, + // but in the case where there is a lot of spinning may result in lower + // CPU usage. + DisableThreadSpinning bool `protobuf:"varint,9,opt,name=disable_thread_spinning,json=disableThreadSpinning,proto3" json:"disable_thread_spinning,omitempty"` + // When true, WorkerSessions are created with device attributes from the + // full cluster. + // This is helpful when a worker wants to partition a graph + // (for example during a PartitionedCallOp). + ShareClusterDevicesInSession bool `protobuf:"varint,10,opt,name=share_cluster_devices_in_session,json=shareClusterDevicesInSession,proto3" json:"share_cluster_devices_in_session,omitempty"` + // Metadata about the session. + // + // If set, this can be used by the runtime and the Ops for debugging, + // monitoring, etc. + // + // NOTE: This is currently used and propagated only by the direct session. + SessionMetadata *SessionMetadata `protobuf:"bytes,11,opt,name=session_metadata,json=sessionMetadata,proto3" json:"session_metadata,omitempty"` + // If true, the session may treat the graph as being static for optimization + // purposes. + // + // If this option is set to true when a session is created, the full + // GraphDef must be passed in a single call to Session::Create(), and + // Session::Extend() may not be supported. + OptimizeForStaticGraph bool `protobuf:"varint,12,opt,name=optimize_for_static_graph,json=optimizeForStaticGraph,proto3" json:"optimize_for_static_graph,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfigProto_Experimental) Reset() { *m = ConfigProto_Experimental{} } +func (m *ConfigProto_Experimental) String() string { return proto.CompactTextString(m) } +func (*ConfigProto_Experimental) ProtoMessage() {} +func (*ConfigProto_Experimental) Descriptor() ([]byte, []int) { + return fileDescriptor_e2349c44c118036b, []int{6, 1} +} + +func (m *ConfigProto_Experimental) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConfigProto_Experimental.Unmarshal(m, b) +} +func (m *ConfigProto_Experimental) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConfigProto_Experimental.Marshal(b, m, deterministic) +} +func (m *ConfigProto_Experimental) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigProto_Experimental.Merge(m, src) +} +func (m *ConfigProto_Experimental) XXX_Size() int { + return xxx_messageInfo_ConfigProto_Experimental.Size(m) +} +func (m *ConfigProto_Experimental) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigProto_Experimental.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigProto_Experimental proto.InternalMessageInfo + +func (m *ConfigProto_Experimental) GetCollectiveGroupLeader() string { + if m != nil { + return m.CollectiveGroupLeader + } + return "" +} + +func (m *ConfigProto_Experimental) GetExecutorType() string { + if m != nil { + return m.ExecutorType + } + return "" +} + +func (m *ConfigProto_Experimental) GetRecvBufMaxChunk() int32 { + if m != nil { + return m.RecvBufMaxChunk + } + return 0 +} + +func (m *ConfigProto_Experimental) GetUseNumaAffinity() bool { + if m != nil { + return m.UseNumaAffinity + } + return false +} + +func (m *ConfigProto_Experimental) GetCollectiveDeterministicSequentialExecution() bool { + if m != nil { + return m.CollectiveDeterministicSequentialExecution + } + return false +} + +func (m *ConfigProto_Experimental) GetCollectiveNccl() bool { + if m != nil { + return m.CollectiveNccl + } + return false +} + +func (m *ConfigProto_Experimental) GetShareSessionStateInClusterspecPropagation() bool { + if m != nil { + return m.ShareSessionStateInClusterspecPropagation + } + return false +} + +func (m *ConfigProto_Experimental) GetDisableThreadSpinning() bool { + if m != nil { + return m.DisableThreadSpinning + } + return false +} + +func (m *ConfigProto_Experimental) GetShareClusterDevicesInSession() bool { + if m != nil { + return m.ShareClusterDevicesInSession + } + return false +} + +func (m *ConfigProto_Experimental) GetSessionMetadata() *SessionMetadata { + if m != nil { + return m.SessionMetadata + } + return nil +} + +func (m *ConfigProto_Experimental) GetOptimizeForStaticGraph() bool { + if m != nil { + return m.OptimizeForStaticGraph + } + return false +} + +// Options for a single Run() call. +type RunOptions struct { + TraceLevel RunOptions_TraceLevel `protobuf:"varint,1,opt,name=trace_level,json=traceLevel,proto3,enum=tensorflow.RunOptions_TraceLevel" json:"trace_level,omitempty"` + // Time to wait for operation to complete in milliseconds. + TimeoutInMs int64 `protobuf:"varint,2,opt,name=timeout_in_ms,json=timeoutInMs,proto3" json:"timeout_in_ms,omitempty"` + // The thread pool to use, if session_inter_op_thread_pool is configured. + // To use the caller thread set this to -1 - this uses the caller thread + // to execute Session::Run() and thus avoids a context switch. Using the + // caller thread to execute Session::Run() should be done ONLY for simple + // graphs, where the overhead of an additional context switch is + // comparable with the overhead of Session::Run(). + InterOpThreadPool int32 `protobuf:"varint,3,opt,name=inter_op_thread_pool,json=interOpThreadPool,proto3" json:"inter_op_thread_pool,omitempty"` + // Whether the partition graph(s) executed by the executor(s) should be + // outputted via RunMetadata. + OutputPartitionGraphs bool `protobuf:"varint,5,opt,name=output_partition_graphs,json=outputPartitionGraphs,proto3" json:"output_partition_graphs,omitempty"` + // EXPERIMENTAL. Options used to initialize DebuggerState, if enabled. + DebugOptions *DebugOptions `protobuf:"bytes,6,opt,name=debug_options,json=debugOptions,proto3" json:"debug_options,omitempty"` + // When enabled, causes tensor allocation information to be included in + // the error message when the Run() call fails because the allocator ran + // out of memory (OOM). + // + // Enabling this option can slow down the Run() call. + ReportTensorAllocationsUponOom bool `protobuf:"varint,7,opt,name=report_tensor_allocations_upon_oom,json=reportTensorAllocationsUponOom,proto3" json:"report_tensor_allocations_upon_oom,omitempty"` + Experimental *RunOptions_Experimental `protobuf:"bytes,8,opt,name=experimental,proto3" json:"experimental,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RunOptions) Reset() { *m = RunOptions{} } +func (m *RunOptions) String() string { return proto.CompactTextString(m) } +func (*RunOptions) ProtoMessage() {} +func (*RunOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e2349c44c118036b, []int{7} +} + +func (m *RunOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RunOptions.Unmarshal(m, b) +} +func (m *RunOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RunOptions.Marshal(b, m, deterministic) +} +func (m *RunOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunOptions.Merge(m, src) +} +func (m *RunOptions) XXX_Size() int { + return xxx_messageInfo_RunOptions.Size(m) +} +func (m *RunOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunOptions proto.InternalMessageInfo + +func (m *RunOptions) GetTraceLevel() RunOptions_TraceLevel { + if m != nil { + return m.TraceLevel + } + return RunOptions_NO_TRACE +} + +func (m *RunOptions) GetTimeoutInMs() int64 { + if m != nil { + return m.TimeoutInMs + } + return 0 +} + +func (m *RunOptions) GetInterOpThreadPool() int32 { + if m != nil { + return m.InterOpThreadPool + } + return 0 +} + +func (m *RunOptions) GetOutputPartitionGraphs() bool { + if m != nil { + return m.OutputPartitionGraphs + } + return false +} + +func (m *RunOptions) GetDebugOptions() *DebugOptions { + if m != nil { + return m.DebugOptions + } + return nil +} + +func (m *RunOptions) GetReportTensorAllocationsUponOom() bool { + if m != nil { + return m.ReportTensorAllocationsUponOom + } + return false +} + +func (m *RunOptions) GetExperimental() *RunOptions_Experimental { + if m != nil { + return m.Experimental + } + return nil +} + +// Everything inside Experimental is subject to change and is not subject +// to API stability guarantees in +// https://www.tensorflow.org/guide/version_compat. +type RunOptions_Experimental struct { + // If non-zero, declares that this graph is going to use collective + // ops and must synchronize step_ids with any other graph with this + // same group_key value (in a distributed computation where tasks + // run disjoint graphs). + CollectiveGraphKey int64 `protobuf:"varint,1,opt,name=collective_graph_key,json=collectiveGraphKey,proto3" json:"collective_graph_key,omitempty"` + // If true, then operations (using the inter-op pool) across all + // session::run() calls will be centrally scheduled, optimizing for (median + // and tail) latency. + // Consider using this option for CPU-bound workloads like inference. + UseRunHandlerPool bool `protobuf:"varint,2,opt,name=use_run_handler_pool,json=useRunHandlerPool,proto3" json:"use_run_handler_pool,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RunOptions_Experimental) Reset() { *m = RunOptions_Experimental{} } +func (m *RunOptions_Experimental) String() string { return proto.CompactTextString(m) } +func (*RunOptions_Experimental) ProtoMessage() {} +func (*RunOptions_Experimental) Descriptor() ([]byte, []int) { + return fileDescriptor_e2349c44c118036b, []int{7, 0} +} + +func (m *RunOptions_Experimental) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RunOptions_Experimental.Unmarshal(m, b) +} +func (m *RunOptions_Experimental) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RunOptions_Experimental.Marshal(b, m, deterministic) +} +func (m *RunOptions_Experimental) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunOptions_Experimental.Merge(m, src) +} +func (m *RunOptions_Experimental) XXX_Size() int { + return xxx_messageInfo_RunOptions_Experimental.Size(m) +} +func (m *RunOptions_Experimental) XXX_DiscardUnknown() { + xxx_messageInfo_RunOptions_Experimental.DiscardUnknown(m) +} + +var xxx_messageInfo_RunOptions_Experimental proto.InternalMessageInfo + +func (m *RunOptions_Experimental) GetCollectiveGraphKey() int64 { + if m != nil { + return m.CollectiveGraphKey + } + return 0 +} + +func (m *RunOptions_Experimental) GetUseRunHandlerPool() bool { + if m != nil { + return m.UseRunHandlerPool + } + return false +} + +// Metadata output (i.e., non-Tensor) for a single Run() call. +type RunMetadata struct { + // Statistics traced for this step. Populated if tracing is turned on via the + // "RunOptions" proto. + // EXPERIMENTAL: The format and set of events may change in future versions. + StepStats *framework.StepStats `protobuf:"bytes,1,opt,name=step_stats,json=stepStats,proto3" json:"step_stats,omitempty"` + // The cost graph for the computation defined by the run call. + CostGraph *framework.CostGraphDef `protobuf:"bytes,2,opt,name=cost_graph,json=costGraph,proto3" json:"cost_graph,omitempty"` + // Graphs of the partitions executed by executors. + PartitionGraphs []*framework.GraphDef `protobuf:"bytes,3,rep,name=partition_graphs,json=partitionGraphs,proto3" json:"partition_graphs,omitempty"` + // This is only populated for graphs that are run as functions in TensorFlow + // V2. There will be an entry below for each function that is traced. + // The main use cases of the post_optimization_graph and the partition_graphs + // is to give the caller insight into the graphs that were actually run by the + // runtime. Additional information (such as those in step_stats) will match + // these graphs. + // We also include the pre_optimization_graph since it is usually easier to + // read, and is helpful in situations where the caller wants to get a high + // level idea of what the built graph looks like (since the various graph + // optimization passes might change the structure of the graph significantly). + FunctionGraphs []*RunMetadata_FunctionGraphs `protobuf:"bytes,4,rep,name=function_graphs,json=functionGraphs,proto3" json:"function_graphs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RunMetadata) Reset() { *m = RunMetadata{} } +func (m *RunMetadata) String() string { return proto.CompactTextString(m) } +func (*RunMetadata) ProtoMessage() {} +func (*RunMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_e2349c44c118036b, []int{8} +} + +func (m *RunMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RunMetadata.Unmarshal(m, b) +} +func (m *RunMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RunMetadata.Marshal(b, m, deterministic) +} +func (m *RunMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunMetadata.Merge(m, src) +} +func (m *RunMetadata) XXX_Size() int { + return xxx_messageInfo_RunMetadata.Size(m) +} +func (m *RunMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_RunMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_RunMetadata proto.InternalMessageInfo + +func (m *RunMetadata) GetStepStats() *framework.StepStats { + if m != nil { + return m.StepStats + } + return nil +} + +func (m *RunMetadata) GetCostGraph() *framework.CostGraphDef { + if m != nil { + return m.CostGraph + } + return nil +} + +func (m *RunMetadata) GetPartitionGraphs() []*framework.GraphDef { + if m != nil { + return m.PartitionGraphs + } + return nil +} + +func (m *RunMetadata) GetFunctionGraphs() []*RunMetadata_FunctionGraphs { + if m != nil { + return m.FunctionGraphs + } + return nil +} + +type RunMetadata_FunctionGraphs struct { + // TODO(nareshmodi): Include some sort of function/cache-key identifier? + PartitionGraphs []*framework.GraphDef `protobuf:"bytes,1,rep,name=partition_graphs,json=partitionGraphs,proto3" json:"partition_graphs,omitempty"` + PreOptimizationGraph *framework.GraphDef `protobuf:"bytes,2,opt,name=pre_optimization_graph,json=preOptimizationGraph,proto3" json:"pre_optimization_graph,omitempty"` + PostOptimizationGraph *framework.GraphDef `protobuf:"bytes,3,opt,name=post_optimization_graph,json=postOptimizationGraph,proto3" json:"post_optimization_graph,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RunMetadata_FunctionGraphs) Reset() { *m = RunMetadata_FunctionGraphs{} } +func (m *RunMetadata_FunctionGraphs) String() string { return proto.CompactTextString(m) } +func (*RunMetadata_FunctionGraphs) ProtoMessage() {} +func (*RunMetadata_FunctionGraphs) Descriptor() ([]byte, []int) { + return fileDescriptor_e2349c44c118036b, []int{8, 0} +} + +func (m *RunMetadata_FunctionGraphs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RunMetadata_FunctionGraphs.Unmarshal(m, b) +} +func (m *RunMetadata_FunctionGraphs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RunMetadata_FunctionGraphs.Marshal(b, m, deterministic) +} +func (m *RunMetadata_FunctionGraphs) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunMetadata_FunctionGraphs.Merge(m, src) +} +func (m *RunMetadata_FunctionGraphs) XXX_Size() int { + return xxx_messageInfo_RunMetadata_FunctionGraphs.Size(m) +} +func (m *RunMetadata_FunctionGraphs) XXX_DiscardUnknown() { + xxx_messageInfo_RunMetadata_FunctionGraphs.DiscardUnknown(m) +} + +var xxx_messageInfo_RunMetadata_FunctionGraphs proto.InternalMessageInfo + +func (m *RunMetadata_FunctionGraphs) GetPartitionGraphs() []*framework.GraphDef { + if m != nil { + return m.PartitionGraphs + } + return nil +} + +func (m *RunMetadata_FunctionGraphs) GetPreOptimizationGraph() *framework.GraphDef { + if m != nil { + return m.PreOptimizationGraph + } + return nil +} + +func (m *RunMetadata_FunctionGraphs) GetPostOptimizationGraph() *framework.GraphDef { + if m != nil { + return m.PostOptimizationGraph + } + return nil +} + +// Defines a connection between two tensors in a `GraphDef`. +type TensorConnection struct { + // A tensor name. The value of this tensor will be substituted for + // the tensor named in `to_tensor`. + FromTensor string `protobuf:"bytes,1,opt,name=from_tensor,json=fromTensor,proto3" json:"from_tensor,omitempty"` + // A tensor name. The value of this tensor will be bound to the + // value of the tensor named in `from_tensor`. + ToTensor string `protobuf:"bytes,2,opt,name=to_tensor,json=toTensor,proto3" json:"to_tensor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TensorConnection) Reset() { *m = TensorConnection{} } +func (m *TensorConnection) String() string { return proto.CompactTextString(m) } +func (*TensorConnection) ProtoMessage() {} +func (*TensorConnection) Descriptor() ([]byte, []int) { + return fileDescriptor_e2349c44c118036b, []int{9} +} + +func (m *TensorConnection) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TensorConnection.Unmarshal(m, b) +} +func (m *TensorConnection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TensorConnection.Marshal(b, m, deterministic) +} +func (m *TensorConnection) XXX_Merge(src proto.Message) { + xxx_messageInfo_TensorConnection.Merge(m, src) +} +func (m *TensorConnection) XXX_Size() int { + return xxx_messageInfo_TensorConnection.Size(m) +} +func (m *TensorConnection) XXX_DiscardUnknown() { + xxx_messageInfo_TensorConnection.DiscardUnknown(m) +} + +var xxx_messageInfo_TensorConnection proto.InternalMessageInfo + +func (m *TensorConnection) GetFromTensor() string { + if m != nil { + return m.FromTensor + } + return "" +} + +func (m *TensorConnection) GetToTensor() string { + if m != nil { + return m.ToTensor + } + return "" +} + +// Defines a subgraph in another `GraphDef` as a set of feed points and nodes +// to be fetched or executed. +// +// Compare with the arguments to `Session::Run()`. +type CallableOptions struct { + // Tensors to be fed in the callable. Each feed is the name of a tensor. + Feed []string `protobuf:"bytes,1,rep,name=feed,proto3" json:"feed,omitempty"` + // Fetches. A list of tensor names. The caller of the callable expects a + // tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The + // order of specified fetches does not change the execution order. + Fetch []string `protobuf:"bytes,2,rep,name=fetch,proto3" json:"fetch,omitempty"` + // Target Nodes. A list of node names. The named nodes will be run by the + // callable but their outputs will not be returned. + Target []string `protobuf:"bytes,3,rep,name=target,proto3" json:"target,omitempty"` + // Options that will be applied to each run. + RunOptions *RunOptions `protobuf:"bytes,4,opt,name=run_options,json=runOptions,proto3" json:"run_options,omitempty"` + // Tensors to be connected in the callable. Each TensorConnection denotes + // a pair of tensors in the graph, between which an edge will be created + // in the callable. + TensorConnection []*TensorConnection `protobuf:"bytes,5,rep,name=tensor_connection,json=tensorConnection,proto3" json:"tensor_connection,omitempty"` + // The Tensor objects fed in the callable and fetched from the callable + // are expected to be backed by host (CPU) memory by default. + // + // The options below allow changing that - feeding tensors backed by + // device memory, or returning tensors that are backed by device memory. + // + // The maps below map the name of a feed/fetch tensor (which appears in + // 'feed' or 'fetch' fields above), to the fully qualified name of the device + // owning the memory backing the contents of the tensor. + // + // For example, creating a callable with the following options: + // + // CallableOptions { + // feed: "a:0" + // feed: "b:0" + // + // fetch: "x:0" + // fetch: "y:0" + // + // feed_devices: { + // "a:0": "/job:localhost/replica:0/task:0/device:GPU:0" + // } + // + // fetch_devices: { + // "y:0": "/job:localhost/replica:0/task:0/device:GPU:0" + // } + // } + // + // means that the Callable expects: + // - The first argument ("a:0") is a Tensor backed by GPU memory. + // - The second argument ("b:0") is a Tensor backed by host memory. + // and of its return values: + // - The first output ("x:0") will be backed by host memory. + // - The second output ("y:0") will be backed by GPU memory. + // + // FEEDS: + // It is the responsibility of the caller to ensure that the memory of the fed + // tensors will be correctly initialized and synchronized before it is + // accessed by operations executed during the call to Session::RunCallable(). + // + // This is typically ensured by using the TensorFlow memory allocators + // (Device::GetAllocator()) to create the Tensor to be fed. + // + // Alternatively, for CUDA-enabled GPU devices, this typically means that the + // operation that produced the contents of the tensor has completed, i.e., the + // CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or + // cuStreamSynchronize()). + FeedDevices map[string]string `protobuf:"bytes,6,rep,name=feed_devices,json=feedDevices,proto3" json:"feed_devices,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + FetchDevices map[string]string `protobuf:"bytes,7,rep,name=fetch_devices,json=fetchDevices,proto3" json:"fetch_devices,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // By default, RunCallable() will synchronize the GPU stream before returning + // fetched tensors on a GPU device, to ensure that the values in those tensors + // have been produced. This simplifies interacting with the tensors, but + // potentially incurs a performance hit. + // + // If this options is set to true, the caller is responsible for ensuring + // that the values in the fetched tensors have been produced before they are + // used. The caller can do this by invoking `Device::Sync()` on the underlying + // device(s), or by feeding the tensors back to the same Session using + // `feed_devices` with the same corresponding device name. + FetchSkipSync bool `protobuf:"varint,8,opt,name=fetch_skip_sync,json=fetchSkipSync,proto3" json:"fetch_skip_sync,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CallableOptions) Reset() { *m = CallableOptions{} } +func (m *CallableOptions) String() string { return proto.CompactTextString(m) } +func (*CallableOptions) ProtoMessage() {} +func (*CallableOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e2349c44c118036b, []int{10} +} + +func (m *CallableOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CallableOptions.Unmarshal(m, b) +} +func (m *CallableOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CallableOptions.Marshal(b, m, deterministic) +} +func (m *CallableOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_CallableOptions.Merge(m, src) +} +func (m *CallableOptions) XXX_Size() int { + return xxx_messageInfo_CallableOptions.Size(m) +} +func (m *CallableOptions) XXX_DiscardUnknown() { + xxx_messageInfo_CallableOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_CallableOptions proto.InternalMessageInfo + +func (m *CallableOptions) GetFeed() []string { + if m != nil { + return m.Feed + } + return nil +} + +func (m *CallableOptions) GetFetch() []string { + if m != nil { + return m.Fetch + } + return nil +} + +func (m *CallableOptions) GetTarget() []string { + if m != nil { + return m.Target + } + return nil +} + +func (m *CallableOptions) GetRunOptions() *RunOptions { + if m != nil { + return m.RunOptions + } + return nil +} + +func (m *CallableOptions) GetTensorConnection() []*TensorConnection { + if m != nil { + return m.TensorConnection + } + return nil +} + +func (m *CallableOptions) GetFeedDevices() map[string]string { + if m != nil { + return m.FeedDevices + } + return nil +} + +func (m *CallableOptions) GetFetchDevices() map[string]string { + if m != nil { + return m.FetchDevices + } + return nil +} + +func (m *CallableOptions) GetFetchSkipSync() bool { + if m != nil { + return m.FetchSkipSync + } + return false +} + +func init() { + proto.RegisterEnum("tensorflow.OptimizerOptions_Level", OptimizerOptions_Level_name, OptimizerOptions_Level_value) + proto.RegisterEnum("tensorflow.OptimizerOptions_GlobalJitLevel", OptimizerOptions_GlobalJitLevel_name, OptimizerOptions_GlobalJitLevel_value) + proto.RegisterEnum("tensorflow.RunOptions_TraceLevel", RunOptions_TraceLevel_name, RunOptions_TraceLevel_value) + proto.RegisterType((*GPUOptions)(nil), "tensorflow.GPUOptions") + proto.RegisterType((*GPUOptions_Experimental)(nil), "tensorflow.GPUOptions.Experimental") + proto.RegisterType((*GPUOptions_Experimental_VirtualDevices)(nil), "tensorflow.GPUOptions.Experimental.VirtualDevices") + proto.RegisterType((*OptimizerOptions)(nil), "tensorflow.OptimizerOptions") + proto.RegisterType((*GraphOptions)(nil), "tensorflow.GraphOptions") + proto.RegisterType((*ThreadPoolOptionProto)(nil), "tensorflow.ThreadPoolOptionProto") + proto.RegisterType((*RPCOptions)(nil), "tensorflow.RPCOptions") + proto.RegisterType((*SessionMetadata)(nil), "tensorflow.SessionMetadata") + proto.RegisterType((*ConfigProto)(nil), "tensorflow.ConfigProto") + proto.RegisterMapType((map[string]int32)(nil), "tensorflow.ConfigProto.DeviceCountEntry") + proto.RegisterType((*ConfigProto_Experimental)(nil), "tensorflow.ConfigProto.Experimental") + proto.RegisterType((*RunOptions)(nil), "tensorflow.RunOptions") + proto.RegisterType((*RunOptions_Experimental)(nil), "tensorflow.RunOptions.Experimental") + proto.RegisterType((*RunMetadata)(nil), "tensorflow.RunMetadata") + proto.RegisterType((*RunMetadata_FunctionGraphs)(nil), "tensorflow.RunMetadata.FunctionGraphs") + proto.RegisterType((*TensorConnection)(nil), "tensorflow.TensorConnection") + proto.RegisterType((*CallableOptions)(nil), "tensorflow.CallableOptions") + proto.RegisterMapType((map[string]string)(nil), "tensorflow.CallableOptions.FeedDevicesEntry") + proto.RegisterMapType((map[string]string)(nil), "tensorflow.CallableOptions.FetchDevicesEntry") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/config.proto", fileDescriptor_e2349c44c118036b) +} + +var fileDescriptor_e2349c44c118036b = []byte{ + // 2734 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x59, 0xdd, 0x76, 0xdb, 0xc6, + 0x11, 0x0e, 0x45, 0xd2, 0xa6, 0x86, 0x34, 0x49, 0xad, 0x25, 0x8b, 0x91, 0x7d, 0x1a, 0x85, 0x89, + 0x5d, 0xe5, 0x8f, 0x76, 0x94, 0xc4, 0x4e, 0x9b, 0x26, 0x3e, 0x32, 0x65, 0x2a, 0x72, 0x24, 0x8b, + 0x67, 0x29, 0xb7, 0xa7, 0xed, 0x05, 0xce, 0x0a, 0x58, 0x50, 0xa8, 0x80, 0x5d, 0x64, 0x77, 0x21, + 0x5b, 0xb9, 0xec, 0x13, 0xf4, 0x25, 0x7a, 0xd1, 0x9b, 0x3c, 0x41, 0x9f, 0xa0, 0xaf, 0xd2, 0x17, + 0xe8, 0x45, 0x4f, 0xdb, 0xb3, 0x3f, 0x20, 0x40, 0x4a, 0x76, 0x1a, 0xdf, 0x18, 0x98, 0x6f, 0x66, + 0xb0, 0x3b, 0x3b, 0xf3, 0xcd, 0x2c, 0x05, 0x77, 0x15, 0x65, 0x92, 0x8b, 0x30, 0xe6, 0x2f, 0xef, + 0xfb, 0x5c, 0xd0, 0xfb, 0xa9, 0xe0, 0x8a, 0x9f, 0x64, 0xe1, 0x7d, 0x9f, 0xb3, 0x30, 0x9a, 0x0e, + 0xcc, 0x3b, 0x82, 0x42, 0x6d, 0xe3, 0xc3, 0x45, 0x93, 0x50, 0x90, 0x84, 0xbe, 0xe4, 0xe2, 0xec, + 0xbe, 0xcf, 0xa5, 0xf2, 0xa6, 0x82, 0xa4, 0xa7, 0xd6, 0x6e, 0xe3, 0xee, 0xeb, 0x75, 0xcb, 0x6a, + 0x6f, 0x70, 0x29, 0x15, 0x4d, 0x3d, 0xa9, 0x88, 0x92, 0x4e, 0xf7, 0xde, 0xeb, 0x57, 0x1c, 0x67, + 0x52, 0x51, 0xe1, 0xf4, 0xde, 0x7f, 0xad, 0x5e, 0x40, 0x4f, 0x32, 0xb7, 0xb1, 0x8d, 0xc1, 0x6b, + 0xb5, 0x04, 0x7d, 0x29, 0x22, 0x45, 0x85, 0x57, 0x0e, 0x44, 0xff, 0xc7, 0x06, 0xc0, 0xde, 0xf8, + 0xc5, 0x51, 0xaa, 0x22, 0xce, 0x24, 0xda, 0x85, 0x77, 0x52, 0x2a, 0xbc, 0x54, 0x70, 0x9f, 0x4a, + 0xe9, 0x4d, 0xd3, 0xcc, 0x4b, 0x68, 0xc2, 0xc5, 0x85, 0x17, 0x0a, 0xe2, 0x6b, 0x9d, 0x5e, 0x65, + 0xb3, 0xb2, 0x55, 0xc1, 0xb7, 0x53, 0x2a, 0xc6, 0x56, 0x6b, 0x2f, 0xcd, 0x0e, 0x8d, 0xce, 0xc8, + 0xa9, 0xa0, 0x77, 0xa1, 0x45, 0xe2, 0x98, 0xbf, 0xf4, 0xa6, 0x82, 0xbf, 0x54, 0xa7, 0xbd, 0xda, + 0x66, 0x65, 0xab, 0x81, 0x9b, 0x46, 0xb6, 0x67, 0x44, 0xe8, 0x2e, 0xb4, 0xf5, 0xab, 0x4f, 0x14, + 0x17, 0x9e, 0xba, 0x48, 0x69, 0x6f, 0x69, 0xb3, 0xb2, 0xb5, 0x8c, 0x6f, 0xcc, 0xa4, 0xc7, 0x17, + 0x29, 0x45, 0x0f, 0x61, 0x3d, 0xa0, 0x21, 0x15, 0x82, 0x06, 0x5e, 0x40, 0x63, 0xaa, 0xdd, 0x7b, + 0x27, 0x17, 0x8a, 0xca, 0x5e, 0x75, 0xb3, 0xb2, 0x55, 0xc5, 0x6b, 0x39, 0xbc, 0xeb, 0xd0, 0x27, + 0x1a, 0x44, 0x03, 0xb8, 0x79, 0x1e, 0xc9, 0xe8, 0x24, 0xa6, 0x5e, 0x40, 0xcf, 0x23, 0x9f, 0x7a, + 0x71, 0x24, 0x55, 0xaf, 0x6e, 0xbe, 0xb1, 0xe2, 0xa0, 0x5d, 0x83, 0x1c, 0x44, 0x52, 0xa1, 0xaf, + 0x60, 0x23, 0xe5, 0x71, 0x1c, 0xb1, 0xa9, 0xa7, 0xf7, 0x70, 0xae, 0xcd, 0x62, 0x72, 0xe1, 0x65, + 0x92, 0xfa, 0xb2, 0x77, 0x6d, 0xb3, 0xb2, 0x55, 0xc7, 0xeb, 0x4e, 0x63, 0xc7, 0x28, 0xec, 0x6a, + 0xfc, 0x85, 0x86, 0xd1, 0x63, 0xb8, 0x93, 0x1b, 0x47, 0x6c, 0xce, 0x3c, 0x31, 0xe6, 0xd7, 0x8d, + 0xf9, 0xdb, 0x4e, 0x67, 0xdf, 0xa9, 0x18, 0x07, 0x87, 0xc6, 0xc1, 0x03, 0x58, 0x0d, 0xb9, 0xf0, + 0xa9, 0x89, 0xb7, 0xcf, 0x93, 0x94, 0x28, 0xbd, 0xbe, 0x5e, 0xc3, 0xc4, 0x0d, 0x19, 0x6c, 0x2f, + 0xcd, 0x86, 0x33, 0x04, 0xed, 0x41, 0x8b, 0xbe, 0x4a, 0xa9, 0x88, 0x12, 0xca, 0x14, 0x89, 0x7b, + 0xcb, 0x9b, 0x95, 0xad, 0xe6, 0xf6, 0x7b, 0xa5, 0xd3, 0x1f, 0x14, 0xa7, 0x3a, 0x78, 0x5a, 0x52, + 0xc5, 0x73, 0x86, 0x1b, 0x3f, 0xd6, 0xa0, 0x55, 0x86, 0xd1, 0x1f, 0xa1, 0x73, 0x1e, 0x09, 0x95, + 0x91, 0xd8, 0x45, 0x4e, 0xf6, 0x2a, 0x9b, 0xd5, 0xad, 0xe6, 0xf6, 0xf6, 0xff, 0xe1, 0x7c, 0xf0, + 0x5b, 0x6b, 0x6a, 0x23, 0x2b, 0x71, 0xfb, 0x7c, 0xee, 0x1d, 0x7d, 0x0c, 0x28, 0x93, 0xd4, 0xcb, + 0x58, 0x14, 0x46, 0x34, 0x70, 0xa9, 0x65, 0x4e, 0xbe, 0x81, 0xbb, 0x99, 0xa4, 0x2f, 0x2c, 0x60, + 0xd3, 0x09, 0x7d, 0x05, 0xb7, 0x59, 0x96, 0xe8, 0x65, 0x78, 0x8a, 0x9b, 0xff, 0x7c, 0x9e, 0x5e, + 0x78, 0x52, 0x09, 0x4a, 0x12, 0x9b, 0x00, 0x75, 0x7c, 0x8b, 0x65, 0xc9, 0x2e, 0x3d, 0x3f, 0xe6, + 0xbb, 0xf4, 0x7c, 0xc8, 0xd3, 0x8b, 0x89, 0x45, 0xd1, 0x36, 0xac, 0xf9, 0x3c, 0x8e, 0xa9, 0x3d, + 0x0e, 0xa1, 0x0f, 0x87, 0x8b, 0x80, 0x0a, 0x93, 0x8c, 0xcb, 0xf8, 0x66, 0x01, 0xe2, 0x88, 0x4d, + 0x8f, 0x34, 0x84, 0x3e, 0x83, 0x35, 0x15, 0x25, 0x54, 0x2a, 0x92, 0xa4, 0x34, 0xf0, 0x66, 0xa9, + 0x68, 0xf2, 0xa6, 0x81, 0x57, 0x4b, 0xe0, 0x4e, 0x8e, 0xa1, 0xaf, 0xe1, 0xf6, 0x19, 0x15, 0x8c, + 0xc6, 0x9e, 0x12, 0xc4, 0x3f, 0xa3, 0xc2, 0x4b, 0xc8, 0x2b, 0x2f, 0x62, 0x8a, 0x8a, 0x73, 0x12, + 0xbb, 0xc3, 0xef, 0x59, 0x95, 0x63, 0xab, 0x71, 0x48, 0x5e, 0xed, 0x3b, 0x1c, 0x3d, 0x82, 0xde, + 0x15, 0xe6, 0x36, 0xc5, 0x1b, 0xc6, 0x76, 0x6d, 0xd1, 0xd6, 0xa6, 0xf8, 0x57, 0xb0, 0x71, 0x85, + 0x61, 0x4a, 0x59, 0x10, 0xb1, 0xa9, 0x49, 0x88, 0x3a, 0x5e, 0x5f, 0x34, 0x1d, 0x5b, 0x78, 0xe3, + 0x4b, 0x68, 0xcf, 0x1f, 0x15, 0xba, 0x07, 0x1d, 0x57, 0xe9, 0x71, 0x94, 0x44, 0xca, 0x4b, 0x4e, + 0xcc, 0xb9, 0x2f, 0xe1, 0x1b, 0x56, 0x7c, 0xa0, 0xa5, 0x87, 0x27, 0xfd, 0x3f, 0xd7, 0xa0, 0xab, + 0x8f, 0x3e, 0x89, 0x7e, 0xa0, 0x22, 0xa7, 0x8d, 0x43, 0x78, 0x2f, 0xe0, 0x3a, 0x73, 0x13, 0xce, + 0x3c, 0x99, 0x9d, 0xd0, 0x57, 0xa9, 0xa0, 0x52, 0xea, 0x62, 0xa5, 0xda, 0x1d, 0x23, 0x33, 0xea, + 0x68, 0xe0, 0xcd, 0x80, 0x0f, 0x8d, 0xe6, 0xa4, 0xac, 0xf8, 0xb4, 0xd0, 0xd3, 0xd5, 0x6b, 0xdc, + 0x31, 0xa9, 0x08, 0x53, 0x5e, 0xc8, 0x63, 0xb3, 0x27, 0x9b, 0x27, 0x2b, 0xda, 0xdc, 0x22, 0x23, + 0x0b, 0xa0, 0x6f, 0xe0, 0x8e, 0xde, 0xbb, 0xd6, 0xa3, 0x41, 0x61, 0x17, 0xe5, 0x54, 0x71, 0xcd, + 0x50, 0x45, 0x2f, 0x21, 0xaf, 0x46, 0x46, 0x25, 0xb7, 0xdf, 0x77, 0x6c, 0xf1, 0x00, 0x56, 0x03, + 0xee, 0x85, 0x19, 0x33, 0xf4, 0xe5, 0x45, 0x2c, 0x8e, 0x98, 0xfe, 0xa0, 0xe5, 0x2d, 0x14, 0xf0, + 0x91, 0x83, 0xf6, 0x1d, 0x82, 0x1e, 0xc3, 0x32, 0x4f, 0x95, 0x17, 0xd3, 0x73, 0x1a, 0x9b, 0x44, + 0x6c, 0x6f, 0xf7, 0xcb, 0xf5, 0xb1, 0x18, 0xa1, 0xc1, 0x81, 0xd6, 0xc4, 0x0d, 0x9e, 0x2a, 0xf3, + 0x84, 0x5e, 0x40, 0x77, 0x1a, 0xf3, 0x13, 0x12, 0x7b, 0x7f, 0x8a, 0x72, 0x3f, 0x75, 0xe3, 0xe7, + 0xa3, 0x37, 0xfa, 0xd9, 0x33, 0x46, 0xcf, 0x22, 0xeb, 0x06, 0xb7, 0xa7, 0x73, 0xef, 0xfd, 0x4d, + 0xa8, 0x5b, 0xff, 0xd7, 0x60, 0xe9, 0xe0, 0xd3, 0xee, 0x5b, 0xa8, 0x03, 0x4b, 0x07, 0x0f, 0xba, + 0xff, 0xcd, 0xff, 0x55, 0xfa, 0x43, 0x68, 0xcf, 0xfb, 0x40, 0x4d, 0xb8, 0xbe, 0xfb, 0x74, 0xb4, + 0xf3, 0xe2, 0xe0, 0xb8, 0xfb, 0x16, 0xea, 0x42, 0xf5, 0x68, 0x34, 0x2a, 0x19, 0xa0, 0x06, 0xd4, + 0x8e, 0x9e, 0x7b, 0x9f, 0x76, 0xf3, 0xa7, 0xed, 0xee, 0x52, 0xff, 0x2f, 0x35, 0x68, 0xed, 0xe9, + 0x7e, 0x97, 0x27, 0xc0, 0xe7, 0x70, 0x8b, 0x32, 0xa2, 0xe9, 0x56, 0x50, 0xff, 0xdc, 0x93, 0xfe, + 0x29, 0x0d, 0xb2, 0xb8, 0x38, 0xb4, 0x55, 0x8b, 0x62, 0xea, 0x9f, 0x4f, 0x66, 0x18, 0xda, 0x87, + 0x15, 0x9e, 0x6f, 0xd0, 0xe3, 0xd6, 0x95, 0x89, 0x66, 0x73, 0xfb, 0xce, 0x9b, 0xa2, 0x80, 0xbb, + 0x7c, 0x31, 0x03, 0xb7, 0xa0, 0x7b, 0x92, 0x45, 0xb1, 0x3e, 0x7d, 0xa9, 0xbc, 0x84, 0x07, 0x34, + 0x36, 0xc7, 0x57, 0xc5, 0x6d, 0x23, 0x1f, 0x72, 0xa9, 0x0e, 0xb5, 0x14, 0x7d, 0x06, 0xb7, 0x16, + 0x35, 0x3d, 0x12, 0x2a, 0x2a, 0x4c, 0xcd, 0x54, 0xf1, 0xcd, 0x79, 0xfd, 0x1d, 0x0d, 0xe9, 0x8e, + 0x16, 0xb1, 0x90, 0x0a, 0x4f, 0x9e, 0x92, 0x94, 0x4a, 0x47, 0x08, 0x4d, 0x23, 0x9b, 0x18, 0x91, + 0xe6, 0xb6, 0x34, 0x26, 0x3e, 0xf5, 0x52, 0x91, 0x31, 0x1a, 0xd8, 0xb1, 0xc1, 0xa4, 0x5e, 0x03, + 0x77, 0x0d, 0x32, 0x36, 0x80, 0x89, 0x1b, 0xfa, 0x12, 0x7a, 0x2e, 0x60, 0x27, 0x61, 0xcc, 0x89, + 0xfa, 0xf4, 0xa1, 0x27, 0x29, 0x0b, 0x74, 0xf4, 0x0c, 0x65, 0x34, 0xb0, 0x0b, 0xe8, 0x13, 0x07, + 0x4f, 0x1c, 0x8a, 0xde, 0x83, 0x1b, 0x9a, 0x87, 0xe2, 0x88, 0x51, 0x4f, 0x0f, 0x13, 0x8e, 0x25, + 0x5a, 0xb9, 0x70, 0xa2, 0x68, 0x8a, 0x86, 0xd0, 0x71, 0xfd, 0x7e, 0x16, 0x57, 0x30, 0x71, 0x2d, + 0x0f, 0x08, 0x03, 0xec, 0x46, 0x82, 0xa1, 0x99, 0x08, 0x70, 0xdb, 0x99, 0xb8, 0x98, 0x3e, 0xab, + 0x35, 0x2a, 0xdd, 0x25, 0x7c, 0x57, 0x9e, 0x45, 0xe9, 0x4f, 0xd6, 0x76, 0xff, 0xf7, 0xb0, 0x76, + 0x7c, 0x2a, 0x28, 0x09, 0xc6, 0x9c, 0xc7, 0xd6, 0xc3, 0xd8, 0x8c, 0x5a, 0xef, 0x40, 0x53, 0xb3, + 0xb8, 0x32, 0xa0, 0x34, 0x1c, 0x50, 0xc7, 0xc0, 0xb2, 0xc4, 0xaa, 0x4b, 0xad, 0xe0, 0x4a, 0x81, + 0x91, 0x24, 0x9f, 0x03, 0xc0, 0x8a, 0x9e, 0x93, 0x84, 0xf6, 0xff, 0xb6, 0x04, 0x80, 0xc7, 0xc3, + 0xfc, 0xa8, 0xbf, 0x81, 0x3b, 0xba, 0x89, 0x88, 0xd4, 0xf7, 0x42, 0x2e, 0xbc, 0x88, 0xe5, 0xd3, + 0x4a, 0x42, 0xf4, 0xb8, 0xe4, 0x58, 0xa6, 0x97, 0x49, 0x8a, 0x53, 0x7f, 0xc4, 0xc5, 0x7e, 0xae, + 0x70, 0x68, 0x70, 0xcd, 0xf2, 0xba, 0xc7, 0xe6, 0x9b, 0x20, 0xf1, 0x94, 0x8b, 0x48, 0x9d, 0x26, + 0xee, 0xcb, 0xab, 0x25, 0x70, 0x27, 0xc7, 0xd0, 0x47, 0xb0, 0x52, 0x36, 0x2a, 0x0a, 0xbf, 0x8e, + 0xbb, 0x25, 0xc0, 0x56, 0xd4, 0xc7, 0x80, 0x7c, 0xe2, 0x9f, 0xda, 0x35, 0x0a, 0x2a, 0x53, 0xce, + 0x24, 0x75, 0x6c, 0xd2, 0x35, 0x08, 0x4e, 0x7d, 0xec, 0xe4, 0xe8, 0x3b, 0xe8, 0x07, 0x91, 0x34, + 0xb9, 0x20, 0x9d, 0x7b, 0x9f, 0x33, 0x46, 0x2d, 0x19, 0xc9, 0x53, 0xa2, 0x9b, 0x97, 0xcb, 0xb8, + 0x77, 0x9c, 0xe6, 0xc4, 0x2a, 0x0e, 0x67, 0x7a, 0x13, 0xab, 0xd6, 0x7f, 0x0c, 0x1d, 0x87, 0x1d, + 0x52, 0x45, 0x02, 0xa2, 0x08, 0x42, 0x50, 0x33, 0x81, 0xad, 0x98, 0xed, 0x99, 0x67, 0xd4, 0x83, + 0xeb, 0xe7, 0x54, 0x68, 0x35, 0xb3, 0xeb, 0x2a, 0xce, 0x5f, 0xfb, 0xff, 0xb8, 0x01, 0x4d, 0x9b, + 0x0f, 0xf6, 0xf8, 0xbe, 0x83, 0x96, 0x9b, 0xa0, 0x7c, 0x9e, 0x31, 0xe5, 0x86, 0x81, 0xad, 0x72, + 0x1a, 0x95, 0xd4, 0x07, 0xb6, 0x9d, 0x0c, 0xb5, 0xea, 0x53, 0xa6, 0xc4, 0x05, 0x6e, 0x06, 0x85, + 0x44, 0x4f, 0x4a, 0x11, 0x53, 0x82, 0x78, 0x3c, 0xf5, 0x52, 0x22, 0x48, 0x1c, 0xd3, 0x38, 0x92, + 0x45, 0x72, 0x2c, 0xd9, 0x49, 0xc9, 0xe8, 0x1c, 0xa5, 0xe3, 0x42, 0x23, 0xcf, 0x15, 0xeb, 0xc0, + 0xb0, 0xc5, 0x95, 0x0e, 0xea, 0x33, 0x07, 0x9a, 0x1a, 0xae, 0x70, 0xf0, 0x05, 0xac, 0xeb, 0xe4, + 0xd1, 0x43, 0x6e, 0x1e, 0xec, 0xdc, 0x76, 0xd9, 0x32, 0x55, 0x26, 0xe9, 0x98, 0x0a, 0x17, 0xc4, + 0xdc, 0x8c, 0xc0, 0x9d, 0x5c, 0x7d, 0xf6, 0x7d, 0x6b, 0xe7, 0xa5, 0x9c, 0xc7, 0xbd, 0x96, 0x89, + 0xca, 0xbb, 0xe5, 0xa8, 0x5c, 0x59, 0x0d, 0xb8, 0xe7, 0xdc, 0xec, 0xdb, 0x15, 0x16, 0x4a, 0xe8, + 0x03, 0xb0, 0x2c, 0xa1, 0x27, 0x29, 0xbd, 0xbe, 0x88, 0x07, 0x2e, 0xc1, 0x3a, 0x33, 0xf9, 0xd8, + 0x88, 0xf5, 0xf0, 0xec, 0xce, 0x24, 0x8c, 0x62, 0x45, 0x85, 0xec, 0xd5, 0x36, 0xab, 0x7a, 0x78, + 0xb6, 0xd2, 0x91, 0x15, 0xa2, 0x47, 0xd0, 0xd4, 0x03, 0x65, 0x4e, 0x00, 0xd7, 0x0c, 0x01, 0xdc, + 0xba, 0x7a, 0x8c, 0xc3, 0x30, 0x4d, 0xb3, 0xbc, 0xc2, 0x1e, 0xc0, 0xaa, 0x9d, 0xdf, 0x25, 0x0f, + 0x95, 0x37, 0xfb, 0xba, 0x23, 0x26, 0x64, 0xb0, 0x09, 0x0f, 0xd5, 0x38, 0x47, 0xb4, 0x45, 0xcc, + 0xa7, 0xf9, 0xac, 0x5d, 0x58, 0xb8, 0x09, 0x36, 0xe6, 0x53, 0x9b, 0x18, 0x85, 0xc5, 0xd7, 0x70, + 0xc3, 0x30, 0xe4, 0x02, 0x3f, 0xf5, 0xe6, 0x96, 0x57, 0x6a, 0x31, 0xb8, 0x35, 0x2d, 0x37, 0x9c, + 0x2f, 0x60, 0x9d, 0xa7, 0x54, 0x18, 0xee, 0xf1, 0x34, 0xf5, 0xf1, 0xcc, 0x34, 0xfc, 0x44, 0xf6, + 0x9a, 0x26, 0xa1, 0x57, 0x67, 0xf0, 0xb1, 0x45, 0xf7, 0xd9, 0xa1, 0x09, 0x89, 0xae, 0xc9, 0xfc, + 0x9b, 0x37, 0x2e, 0x87, 0xa4, 0x20, 0x1a, 0x0c, 0x22, 0xf5, 0xf3, 0xef, 0x3d, 0x82, 0xa6, 0xbb, + 0x8e, 0x79, 0x01, 0x0d, 0x7b, 0xed, 0xcb, 0x86, 0x43, 0x0b, 0xef, 0xd2, 0x10, 0x83, 0x3f, 0x7b, + 0xd6, 0x73, 0x68, 0x24, 0x79, 0x4c, 0x54, 0x51, 0xdd, 0xfa, 0xf6, 0x47, 0x7b, 0x1d, 0x13, 0x9a, + 0x9b, 0x0e, 0x74, 0xf9, 0x36, 0xd1, 0x10, 0xfa, 0x76, 0x61, 0xba, 0xef, 0x9a, 0xaf, 0xbd, 0xff, + 0xba, 0x9a, 0x7b, 0xc3, 0x78, 0xff, 0x0d, 0x74, 0x17, 0x2b, 0x52, 0xb7, 0xf8, 0x33, 0x7a, 0xe1, + 0xe8, 0x40, 0x3f, 0xa2, 0x55, 0xa8, 0x9f, 0x93, 0x38, 0xa3, 0xae, 0xfe, 0xec, 0xcb, 0xaf, 0x97, + 0xbe, 0xac, 0x6c, 0xfc, 0xbd, 0xbe, 0x70, 0x3d, 0x78, 0x08, 0xeb, 0xa5, 0xb1, 0x7a, 0x2a, 0x78, + 0x96, 0x7a, 0x31, 0x25, 0x81, 0xe3, 0xdd, 0x65, 0x5c, 0x9a, 0xba, 0xf7, 0x34, 0x7a, 0x60, 0x40, + 0xdd, 0xb5, 0xe8, 0x2b, 0xea, 0x67, 0xb3, 0xeb, 0x5e, 0xd5, 0x68, 0xb7, 0x72, 0xa1, 0xb9, 0xed, + 0x7d, 0x04, 0xc8, 0x8c, 0x0f, 0x27, 0x59, 0x68, 0x86, 0x59, 0xff, 0x34, 0x63, 0x67, 0x86, 0x37, + 0xeb, 0xb8, 0xa3, 0x91, 0x27, 0x59, 0x78, 0x48, 0x5e, 0x0d, 0xb5, 0x18, 0x7d, 0x08, 0x2b, 0xba, + 0x92, 0x59, 0x96, 0x10, 0x8f, 0x84, 0x61, 0xc4, 0x22, 0x75, 0xe1, 0x58, 0xb2, 0x93, 0x49, 0xfa, + 0x3c, 0x4b, 0xc8, 0x8e, 0x13, 0x23, 0x02, 0x9f, 0x94, 0x56, 0x1d, 0x50, 0x45, 0x45, 0x12, 0xb1, + 0x48, 0xaa, 0xc8, 0xf7, 0x24, 0xfd, 0x3e, 0xa3, 0x4c, 0x45, 0x24, 0xf6, 0xec, 0x62, 0x34, 0x29, + 0xda, 0xb6, 0xfd, 0x61, 0x61, 0xb4, 0x5b, 0xb6, 0x99, 0xcc, 0x4c, 0x9e, 0xe6, 0x16, 0xe8, 0x97, + 0xd0, 0x29, 0x7d, 0x82, 0xf9, 0x7e, 0xec, 0xca, 0xa5, 0x5d, 0x88, 0x9f, 0xfb, 0x7e, 0x8c, 0x08, + 0x0c, 0x34, 0xa7, 0x2f, 0xa4, 0x83, 0xce, 0x5d, 0x97, 0x35, 0x32, 0xa5, 0xbe, 0xbe, 0x81, 0xa7, + 0x64, 0x6a, 0xc7, 0x66, 0x5b, 0x44, 0x1f, 0x18, 0xab, 0x72, 0x9e, 0xec, 0xb3, 0x61, 0x61, 0x31, + 0x2e, 0x0c, 0xcc, 0xad, 0xd9, 0x75, 0x14, 0x47, 0x52, 0x32, 0x8d, 0x18, 0xcb, 0xef, 0x05, 0x0d, + 0xbc, 0xe6, 0x60, 0x4b, 0x3f, 0x13, 0x07, 0xa2, 0x11, 0x6c, 0xda, 0xa5, 0x15, 0xa9, 0x6e, 0x2e, + 0x07, 0x7a, 0x71, 0x6e, 0xb5, 0xa6, 0x4c, 0x1b, 0xf8, 0x8e, 0xd1, 0x9b, 0xa5, 0xbc, 0xd1, 0xda, + 0x67, 0x6e, 0x71, 0x68, 0x04, 0xdd, 0x7c, 0x73, 0x89, 0xeb, 0x42, 0xa6, 0x2a, 0x9b, 0xdb, 0xb7, + 0xcb, 0x39, 0xbc, 0xd0, 0xa8, 0x70, 0x47, 0x2e, 0x74, 0xae, 0x5f, 0xc1, 0xdb, 0xf9, 0xa0, 0x67, + 0x5a, 0xbd, 0x8e, 0x54, 0xe4, 0xbb, 0xc9, 0xaa, 0x65, 0xa7, 0xa4, 0x5c, 0x61, 0xc4, 0xc5, 0xc4, + 0xc0, 0x86, 0x34, 0x9e, 0xd5, 0x1a, 0x4b, 0xdd, 0x6a, 0xff, 0x3f, 0x35, 0x00, 0x9c, 0xb1, 0xbc, + 0x88, 0x9f, 0x40, 0x53, 0xdf, 0x95, 0xa8, 0x6b, 0xdf, 0x15, 0x33, 0x6f, 0xcf, 0x91, 0x76, 0xa1, + 0x3c, 0xd0, 0x97, 0x26, 0x6a, 0xa7, 0x6c, 0x50, 0xb3, 0x67, 0xd4, 0xb7, 0xe3, 0x57, 0x41, 0x37, + 0xb6, 0x7f, 0x36, 0x55, 0x89, 0x65, 0xee, 0xc3, 0xea, 0x95, 0x5d, 0xc2, 0xd2, 0xf9, 0x4a, 0x74, + 0x89, 0xfb, 0x1f, 0xc2, 0x3a, 0xcf, 0x54, 0x9a, 0x29, 0xdd, 0xd4, 0x54, 0x64, 0x48, 0xcd, 0xec, + 0x32, 0x9f, 0x34, 0xd7, 0x2c, 0x3c, 0xce, 0x51, 0xb3, 0x49, 0xa9, 0x49, 0xd4, 0xfc, 0xf8, 0xb3, + 0xc0, 0xf1, 0x73, 0x24, 0xba, 0xab, 0x15, 0x66, 0x24, 0x1a, 0x94, 0xde, 0xd0, 0x33, 0xe8, 0x0b, + 0x9a, 0x72, 0xa1, 0x3c, 0xab, 0x9f, 0xdf, 0x78, 0x35, 0xe8, 0x65, 0x29, 0x67, 0x1e, 0xe7, 0x89, + 0x4b, 0xe3, 0x5f, 0x58, 0xcd, 0x63, 0xa3, 0xb8, 0x53, 0xe8, 0xbd, 0x48, 0x39, 0x3b, 0xe2, 0xc9, + 0xa5, 0x5f, 0x24, 0x1a, 0x97, 0x7f, 0x91, 0x28, 0x05, 0xf7, 0x0d, 0x94, 0xf5, 0xfd, 0x02, 0xe3, + 0x3c, 0x80, 0xd5, 0x39, 0xc6, 0xd1, 0x3d, 0x23, 0xe7, 0xaf, 0x2a, 0x46, 0x65, 0xba, 0x21, 0xe9, + 0xe9, 0x77, 0xf4, 0x42, 0x87, 0xdf, 0x0c, 0x88, 0x19, 0xf3, 0x4e, 0x09, 0x0b, 0x62, 0x2a, 0x6c, + 0xf8, 0xdd, 0xfd, 0x51, 0x0f, 0x86, 0x19, 0xfb, 0xd6, 0x22, 0x3a, 0xfc, 0x7d, 0x0c, 0x50, 0x9c, + 0x36, 0x6a, 0x41, 0xe3, 0xf9, 0x91, 0x77, 0x8c, 0x77, 0x86, 0x4f, 0xbb, 0x6f, 0x21, 0x04, 0xed, + 0xc9, 0xd1, 0xe8, 0xf8, 0x77, 0x3b, 0xf8, 0xa9, 0x93, 0x55, 0xb4, 0xec, 0xdb, 0x1d, 0xbc, 0x5b, + 0x92, 0x2d, 0xa1, 0x36, 0xc0, 0xe8, 0xc5, 0xc1, 0x81, 0x7b, 0xaf, 0x3e, 0xab, 0x35, 0x6a, 0xdd, + 0x7a, 0xff, 0xdf, 0x55, 0x68, 0xe2, 0xac, 0xc8, 0xe8, 0xcf, 0x01, 0x8a, 0x1f, 0x00, 0xcd, 0x16, + 0x9a, 0xdb, 0x6b, 0x73, 0x35, 0xa1, 0x68, 0xaa, 0xf3, 0x58, 0xe2, 0x65, 0x99, 0x3f, 0xa2, 0x47, + 0x00, 0xc5, 0x2f, 0x91, 0x66, 0x1b, 0x0b, 0x67, 0xac, 0x6f, 0x2b, 0x66, 0xfb, 0xba, 0xfb, 0x2c, + 0xfb, 0xf9, 0x1b, 0x7a, 0x0c, 0xdd, 0x4b, 0x09, 0x55, 0x35, 0xa3, 0xca, 0xea, 0xa5, 0x3e, 0xab, + 0x4d, 0x3b, 0xe9, 0x42, 0x82, 0x1d, 0x41, 0x67, 0x76, 0x2d, 0x76, 0xf6, 0x35, 0x63, 0x7f, 0x6f, + 0xe1, 0x60, 0xf3, 0x1d, 0x0e, 0xf2, 0xab, 0xb2, 0x75, 0x80, 0xdb, 0xe1, 0xdc, 0xfb, 0xc6, 0x3f, + 0x2b, 0xd0, 0x9e, 0x57, 0xb9, 0x72, 0x91, 0x95, 0x9f, 0xb3, 0xc8, 0x67, 0x70, 0x2b, 0x15, 0xf6, + 0xa2, 0x93, 0x44, 0x3f, 0x90, 0xc2, 0x8f, 0x0b, 0xd5, 0xd5, 0x6e, 0x56, 0x53, 0x41, 0x8f, 0x4a, + 0x26, 0x36, 0x62, 0x07, 0xb0, 0x9e, 0xea, 0x50, 0x5f, 0xe1, 0xac, 0xfa, 0x06, 0x67, 0x6b, 0xda, + 0xe8, 0x92, 0xb7, 0xfe, 0x18, 0xba, 0xb6, 0x60, 0x8a, 0x41, 0x5d, 0x5f, 0x77, 0x42, 0xc1, 0x13, + 0x57, 0x72, 0xae, 0x6b, 0x82, 0x16, 0x59, 0x55, 0x74, 0x1b, 0x96, 0x15, 0xcf, 0x61, 0x7b, 0x27, + 0x69, 0x28, 0x6e, 0xc1, 0xfe, 0x5f, 0x6b, 0xd0, 0x19, 0x92, 0x38, 0xd6, 0xec, 0x9d, 0x97, 0x31, + 0x82, 0x5a, 0x48, 0x69, 0x60, 0x82, 0xb6, 0x8c, 0xcd, 0xb3, 0x6e, 0xe9, 0x21, 0x55, 0xbe, 0x0e, + 0x81, 0x16, 0xda, 0x17, 0x74, 0x0b, 0xae, 0x29, 0x22, 0xa6, 0x54, 0x99, 0x2c, 0x58, 0xc6, 0xee, + 0xcd, 0x8c, 0x45, 0x19, 0x9b, 0xb1, 0x48, 0xed, 0x8a, 0xb1, 0x68, 0x56, 0xbb, 0x18, 0x44, 0xc1, + 0xa8, 0xfb, 0xb0, 0xe2, 0xa8, 0xa3, 0xb8, 0xb2, 0xf4, 0xea, 0xe6, 0xf0, 0xe6, 0x6e, 0xf0, 0x8b, + 0x51, 0xc0, 0x5d, 0xb5, 0x18, 0x97, 0x23, 0x68, 0xe9, 0x95, 0xcf, 0x7e, 0x75, 0xbc, 0x66, 0xbc, + 0x7c, 0x3c, 0x97, 0xe6, 0xf3, 0x1b, 0x1f, 0x8c, 0x28, 0x0d, 0x5c, 0xf3, 0x71, 0x97, 0x8d, 0xb0, + 0x90, 0x20, 0x0c, 0x37, 0xcc, 0xae, 0x67, 0x1e, 0xaf, 0x1b, 0x8f, 0x9f, 0xbc, 0xd9, 0xa3, 0xf2, + 0x4f, 0xe7, 0x5c, 0xb6, 0xc2, 0x92, 0x08, 0xdd, 0x83, 0x8e, 0xf5, 0x69, 0x2e, 0xc5, 0xf2, 0x82, + 0xf9, 0xae, 0x3b, 0xdb, 0x4f, 0x4d, 0xce, 0xa2, 0x74, 0x72, 0xc1, 0x7c, 0x3d, 0x77, 0x2d, 0x2e, + 0xee, 0xa7, 0xe6, 0xae, 0xe5, 0xf2, 0xdc, 0xf5, 0x18, 0x56, 0x2e, 0x2d, 0xe5, 0xe7, 0x38, 0x78, + 0x72, 0x06, 0x3d, 0x2e, 0xa6, 0xe5, 0xad, 0xce, 0xfe, 0x04, 0xf1, 0xa4, 0x55, 0x1a, 0x1e, 0xe5, + 0xb8, 0xf2, 0x87, 0xdf, 0x4c, 0x23, 0x75, 0x9a, 0x9d, 0x0c, 0x7c, 0x9e, 0xdc, 0x2f, 0xfd, 0xf9, + 0xe0, 0xea, 0xc7, 0x29, 0x9f, 0xff, 0xbb, 0xc2, 0xbf, 0x2a, 0x95, 0x93, 0x6b, 0xe6, 0xe5, 0xb3, + 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xf1, 0xe6, 0x16, 0xac, 0x7d, 0x19, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/config.proto b/executor/proto/tensorflow/core/protobuf/config.proto new file mode 100644 index 0000000000..8096c5420c --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/config.proto @@ -0,0 +1,740 @@ +syntax = "proto3"; + +package tensorflow; + +option cc_enable_arenas = true; +option java_outer_classname = "ConfigProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; + +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf"; +import "tensorflow/core/framework/cost_graph.proto"; +import "tensorflow/core/framework/graph.proto"; +import "tensorflow/core/framework/step_stats.proto"; +import "tensorflow/core/protobuf/cluster.proto"; +import "tensorflow/core/protobuf/debug.proto"; +import "tensorflow/core/protobuf/rewriter_config.proto"; + +message GPUOptions { + // Fraction of the available GPU memory to allocate for each process. + // 1 means to allocate all of the GPU memory, 0.5 means the process + // allocates up to ~50% of the available GPU memory. + // + // GPU memory is pre-allocated unless the allow_growth option is enabled. + // + // If greater than 1.0, uses CUDA unified memory to potentially oversubscribe + // the amount of memory available on the GPU device by using host memory as a + // swap space. Accessing memory not available on the device will be + // significantly slower as that would require memory transfer between the host + // and the device. Options to reduce the memory requirement should be + // considered before enabling this option as this may come with a negative + // performance impact. Oversubscription using the unified memory requires + // Pascal class or newer GPUs and it is currently only supported on the Linux + // operating system. See + // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements + // for the detailed requirements. + double per_process_gpu_memory_fraction = 1; + + // If true, the allocator does not pre-allocate the entire specified + // GPU memory region, instead starting small and growing as needed. + bool allow_growth = 4; + + // The type of GPU allocation strategy to use. + // + // Allowed values: + // "": The empty string (default) uses a system-chosen default + // which may change over time. + // + // "BFC": A "Best-fit with coalescing" algorithm, simplified from a + // version of dlmalloc. + string allocator_type = 2; + + // Delay deletion of up to this many bytes to reduce the number of + // interactions with gpu driver code. If 0, the system chooses + // a reasonable default (several MBs). + int64 deferred_deletion_bytes = 3; + + // A comma-separated list of GPU ids that determines the 'visible' + // to 'virtual' mapping of GPU devices. For example, if TensorFlow + // can see 8 GPU devices in the process, and one wanted to map + // visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1", + // then one would specify this field as "5,3". This field is similar in + // spirit to the CUDA_VISIBLE_DEVICES environment variable, except + // it applies to the visible GPU devices in the process. + // + // NOTE: + // 1. The GPU driver provides the process with the visible GPUs + // in an order which is not guaranteed to have any correlation to + // the *physical* GPU id in the machine. This field is used for + // remapping "visible" to "virtual", which means this operates only + // after the process starts. Users are required to use vendor + // specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the + // physical to visible device mapping prior to invoking TensorFlow. + // 2. In the code, the ids in this list are also called "platform GPU id"s, + // and the 'virtual' ids of GPU devices (i.e. the ids in the device + // name "/device:GPU:") are also called "TF GPU id"s. Please + // refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h + // for more information. + string visible_device_list = 5; + + // In the event polling loop sleep this many microseconds between + // PollEvents calls, when the queue is not empty. If value is not + // set or set to 0, gets set to a non-zero default. + int32 polling_active_delay_usecs = 6; + + // This field is deprecated and ignored. + int32 polling_inactive_delay_msecs = 7; + + // Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow, + // enabling this option forces all CPU tensors to be allocated with Cuda + // pinned memory. Normally, TensorFlow will infer which tensors should be + // allocated as the pinned memory. But in case where the inference is + // incomplete, this option can significantly speed up the cross-device memory + // copy performance as long as it fits the memory. + // Note that this option is not something that should be + // enabled by default for unknown or very large models, since all Cuda pinned + // memory is unpageable, having too much pinned memory might negatively impact + // the overall host system performance. + bool force_gpu_compatible = 8; + + message Experimental { + // Configuration for breaking down a visible GPU into multiple "virtual" + // devices. + message VirtualDevices { + // Per "virtual" device memory limit, in MB. The number of elements in + // the list is the number of virtual devices to create on the + // corresponding visible GPU (see "virtual_devices" below). + // If empty, it will create single virtual device taking all available + // memory from the device. + // + // For the concept of "visible" and "virtual" GPU, see the comments for + // "visible_device_list" above for more information. + repeated float memory_limit_mb = 1; + } + + // The multi virtual device settings. If empty (not set), it will create + // single virtual device on each visible GPU, according to the settings + // in "visible_device_list" above. Otherwise, the number of elements in the + // list must be the same as the number of visible GPUs (after + // "visible_device_list" filtering if it is set), and the string represented + // device names (e.g. /device:GPU:) will refer to the virtual + // devices and have the field assigned sequentially starting from 0, + // according to the order they appear in this list and the "memory_limit" + // list inside each element. For example, + // visible_device_list = "1,0" + // virtual_devices { memory_limit: 1GB memory_limit: 2GB } + // virtual_devices {} + // will create three virtual devices as: + // /device:GPU:0 -> visible GPU 1 with 1GB memory + // /device:GPU:1 -> visible GPU 1 with 2GB memory + // /device:GPU:2 -> visible GPU 0 with all available memory + // + // NOTE: + // 1. It's invalid to set both this and "per_process_gpu_memory_fraction" + // at the same time. + // 2. Currently this setting is per-process, not per-session. Using + // different settings in different sessions within same process will + // result in undefined behavior. + repeated VirtualDevices virtual_devices = 1; + + // If true, uses CUDA unified memory for memory allocations. If + // per_process_gpu_memory_fraction option is greater than 1.0, then unified + // memory is used regardless of the value for this field. See comments for + // per_process_gpu_memory_fraction field for more details and requirements + // of the unified memory. This option is useful to oversubscribe memory if + // multiple processes are sharing a single GPU while individually using less + // than 1.0 per process memory fraction. + bool use_unified_memory = 2; + + // If > 1, the number of device-to-device copy streams to create + // for each GPUDevice. Default value is 0, which is automatically + // converted to 1. + int32 num_dev_to_dev_copy_streams = 3; + + // If non-empty, defines a good GPU ring order on a single worker based on + // device interconnect. This assumes that all workers have the same GPU + // topology. Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4". + // This ring order is used by the RingReducer implementation of + // CollectiveReduce, and serves as an override to automatic ring order + // generation in OrderTaskDeviceMap() during CollectiveParam resolution. + string collective_ring_order = 4; + + // If true then extra work is done by GPUDevice and GPUBFCAllocator to + // keep track of when GPU memory is freed and when kernels actually + // complete so that we can know when a nominally free memory chunk + // is really not subject to pending use. + bool timestamped_allocator = 5; + + // reserved id: 6 + + // Parameters for GPUKernelTracker. By default no kernel tracking is done. + // Note that timestamped_allocator is only effective if some tracking is + // specified. + // + // If kernel_tracker_max_interval = n > 0, then a tracking event + // is inserted after every n kernels without an event. + int32 kernel_tracker_max_interval = 7; + // If kernel_tracker_max_bytes = n > 0, then a tracking event is + // inserted after every series of kernels allocating a sum of + // memory >= n. If one kernel allocates b * n bytes, then one + // event will be inserted after it, but it will count as b against + // the pending limit. + int32 kernel_tracker_max_bytes = 8; + // If kernel_tracker_max_pending > 0 then no more than this many + // tracking events can be outstanding at a time. An attempt to + // launch an additional kernel will stall until an event + // completes. + int32 kernel_tracker_max_pending = 9; + } + + // Everything inside experimental is subject to change and is not subject + // to API stability guarantees in + // https://www.tensorflow.org/guide/version_compat. + Experimental experimental = 9; +} + +// Options passed to the graph optimizer +message OptimizerOptions { + // If true, optimize the graph using common subexpression elimination. + bool do_common_subexpression_elimination = 1; + + // If true, perform constant folding optimization on the graph. + bool do_constant_folding = 2; + + // Constant folding optimization replaces tensors whose values can be + // predetermined, with constant nodes. To avoid inserting too large constants, + // the size of each constant created can be limited. If this value is zero, a + // default limit of 10 MiB will be applied. If constant folding optimization + // is disabled, this value is ignored. + int64 max_folded_constant_in_bytes = 6; + + // If true, perform function inlining on the graph. + bool do_function_inlining = 4; + + // Optimization level + enum Level { + // L1 is the default level. + // Optimization performed at L1 : + // 1. Common subexpression elimination + // 2. Constant folding + L1 = 0; + + // No optimizations + L0 = -1; + } + + // Overall optimization level. The actual optimizations applied will be the + // logical OR of the flags that this level implies and any flags already set. + Level opt_level = 3; + + // Control the use of the compiler/jit. Experimental. + enum GlobalJitLevel { + DEFAULT = 0; // Default setting ("off" now, but later expected to be "on") + OFF = -1; + // The following settings turn on compilation, with higher values being + // more aggressive. Higher values may reduce opportunities for parallelism + // and may use more memory. (At present, there is no distinction, but this + // is expected to change.) + ON_1 = 1; + ON_2 = 2; + } + GlobalJitLevel global_jit_level = 5; +} + +message GraphOptions { + // Removed, use optimizer_options below. + reserved "skip_common_subexpression_elimination"; + reserved 1; + + // If true, use control flow to schedule the activation of Recv nodes. + // (Currently ignored.) + bool enable_recv_scheduling = 2; + + // Options controlling how graph is optimized. + OptimizerOptions optimizer_options = 3; + + // The number of steps to run before returning a cost model detailing + // the memory usage and performance of each node of the graph. 0 means + // no cost model. + int64 build_cost_model = 4; + + // The number of steps to skip before collecting statistics for the + // cost model. + int64 build_cost_model_after = 9; + + // Annotate each Node with Op output shape data, to the extent it can + // be statically inferred. + bool infer_shapes = 5; + + // Only place the subgraphs that are run, rather than the entire graph. + // + // This is useful for interactive graph building, where one might + // produce graphs that cannot be placed during the debugging + // process. In particular, it allows the client to continue work in + // a session after adding a node to a graph whose placement + // constraints are unsatisfiable. + bool place_pruned_graph = 6; + + // If true, transfer float values between processes as bfloat16. + bool enable_bfloat16_sendrecv = 7; + + // If > 0, record a timeline every this many steps. + // EXPERIMENTAL: This currently has no effect in MasterSession. + int32 timeline_step = 8; + + // Options that control the type and amount of graph rewriting. + // Not currently configurable via the public Python API (i.e. there is no API + // stability guarantee if you import RewriterConfig explicitly). + RewriterConfig rewrite_options = 10; +} + +message ThreadPoolOptionProto { + // The number of threads in the pool. + // + // 0 means the system picks a value based on where this option proto is used + // (see the declaration of the specific field for more info). + int32 num_threads = 1; + + // The global name of the threadpool. + // + // If empty, then the threadpool is made and used according to the scope it's + // in - e.g., for a session threadpool, it is used by that session only. + // + // If non-empty, then: + // - a global threadpool associated with this name is looked + // up or created. This allows, for example, sharing one threadpool across + // many sessions (e.g., like the default behavior, if + // inter_op_parallelism_threads is not configured), but still partitioning + // into a large and small pool. + // - if the threadpool for this global_name already exists, then it is an + // error if the existing pool was created using a different num_threads + // value as is specified on this call. + // - threadpools created this way are never garbage collected. + string global_name = 2; +} + +message RPCOptions { + // If true, always use RPC to contact the session target. + // + // If false (the default option), TensorFlow may use an optimized + // transport for client-master communication that avoids the RPC + // stack. This option is primarily for used testing the RPC stack. + bool use_rpc_for_inprocess_master = 1; + + // The compression algorithm to be used. One of "deflate", "gzip". + string compression_algorithm = 2; + + // If compression_algorithm is set, the compression level to be used. + // From 0 (no compression), up to 3. + int32 compression_level = 3; + + // Setting cache_rpc_response to true will enable sender side caching of + // response for RecvTensorAsync and RecvBufAsync to allow receiver to retry + // requests . This is only necessary when the network fabric is experiencing a + // significant error rate. Without it we'll fail a step on an network error, + // while with it we'll be able to complete long steps (like complex + // initializations) in the face of some network errors during RecvTensor. + bool cache_rpc_response = 4; + + // Disables TCP connection sharing when opening a new RPC channel. + bool disable_session_connection_sharing = 5; +} + +// Metadata about the session. +// +// This can be used by the runtime and the Ops for debugging, monitoring, etc. +// +// The (name, version) tuple is expected to be a unique identifier for +// sessions within the same process. +// +// NOTE: This is currently used and propagated only by the direct session. +message SessionMetadata { + string name = 1; + + // The version is optional. If set, needs to be >= 0. + int64 version = 2; +} + +// Session configuration parameters. +// The system picks appropriate values for fields that are not set. +message ConfigProto { + // Map from device type name (e.g., "CPU" or "GPU" ) to maximum + // number of devices of that type to use. If a particular device + // type is not found in the map, the system picks an appropriate + // number. + map device_count = 1; + + // The execution of an individual op (for some op types) can be + // parallelized on a pool of intra_op_parallelism_threads. + // 0 means the system picks an appropriate number. + int32 intra_op_parallelism_threads = 2; + + // Nodes that perform blocking operations are enqueued on a pool of + // inter_op_parallelism_threads available in each process. + // + // 0 means the system picks an appropriate number. + // Negative means all operations are performed in caller's thread. + // + // Note that the first Session created in the process sets the + // number of threads for all future sessions unless use_per_session_threads is + // true or session_inter_op_thread_pool is configured. + int32 inter_op_parallelism_threads = 5; + + // If true, use a new set of threads for this session rather than the global + // pool of threads. Only supported by direct sessions. + // + // If false, use the global threads created by the first session, or the + // per-session thread pools configured by session_inter_op_thread_pool. + // + // This option is deprecated. The same effect can be achieved by setting + // session_inter_op_thread_pool to have one element, whose num_threads equals + // inter_op_parallelism_threads. + bool use_per_session_threads = 9; + + // This option is experimental - it may be replaced with a different mechanism + // in the future. + // + // Configures session thread pools. If this is configured, then RunOptions for + // a Run call can select the thread pool to use. + // + // The intended use is for when some session invocations need to run in a + // background pool limited to a small number of threads: + // - For example, a session may be configured to have one large pool (for + // regular compute) and one small pool (for periodic, low priority work); + // using the small pool is currently the mechanism for limiting the inter-op + // parallelism of the low priority work. Note that it does not limit the + // parallelism of work spawned by a single op kernel implementation. + // - Using this setting is normally not needed in training, but may help some + // serving use cases. + // - It is also generally recommended to set the global_name field of this + // proto, to avoid creating multiple large pools. It is typically better to + // run the non-low-priority work, even across sessions, in a single large + // pool. + repeated ThreadPoolOptionProto session_inter_op_thread_pool = 12; + + // Assignment of Nodes to Devices is recomputed every placement_period + // steps until the system warms up (at which point the recomputation + // typically slows down automatically). + int32 placement_period = 3; + + // When any filters are present sessions will ignore all devices which do not + // match the filters. Each filter can be partially specified, e.g. "/job:ps" + // "/job:worker/replica:3", etc. + repeated string device_filters = 4; + + // Options that apply to all GPUs. + GPUOptions gpu_options = 6; + + // Whether soft placement is allowed. If allow_soft_placement is true, + // an op will be placed on CPU if + // 1. there's no GPU implementation for the OP + // or + // 2. no GPU devices are known or registered + // or + // 3. need to co-locate with reftype input(s) which are from CPU. + bool allow_soft_placement = 7; + + // Whether device placements should be logged. + bool log_device_placement = 8; + + // Options that apply to all graphs. + GraphOptions graph_options = 10; + + // Global timeout for all blocking operations in this session. If non-zero, + // and not overridden on a per-operation basis, this value will be used as the + // deadline for all blocking operations. + int64 operation_timeout_in_ms = 11; + + // Options that apply when this session uses the distributed runtime. + RPCOptions rpc_options = 13; + + // Optional list of all workers to use in this session. + ClusterDef cluster_def = 14; + + // If true, any resources such as Variables used in the session will not be + // shared with other sessions. However, when clusterspec propagation is + // enabled, this field is ignored and sessions are always isolated. + bool isolate_session_state = 15; + + // Everything inside Experimental is subject to change and is not subject + // to API stability guarantees in + // https://www.tensorflow.org/guide/version_compat. + message Experimental { + // Task name for group resolution. + string collective_group_leader = 1; + + // We removed the flag client_handles_error_formatting. Marking the tag + // number as reserved. + // TODO(shikharagarwal): Should we just remove this tag so that it can be + // used in future for other purpose? + reserved 2; + + // Which executor to use, the default executor will be used + // if it is an empty string or "DEFAULT" + string executor_type = 3; + + // Guidance to formatting of large RecvBuf fields for transfer. + // Any positive value sets the max chunk size. 0 defaults to 4096. + // Any negative value indicates no max, i.e. one chunk only. + int32 recv_buf_max_chunk = 4; + + // If true, and supported by the platform, the runtime will attempt to + // use NUMA affinity where applicable. One consequence will be the + // existence of as many CPU devices as there are available NUMA nodes. + bool use_numa_affinity = 5; + + // If true, make collective op execution order sequential and deterministic + // for potentially concurrent collective instances. + bool collective_deterministic_sequential_execution = 6; + + // If true, use NCCL for CollectiveOps. This feature is highly + // experimental. + bool collective_nccl = 7; + + // In the following, session state means the value of a variable, elements + // in a hash table, or any other resource, accessible by worker sessions + // held by a TF server. + // + // When ClusterSpec propagation is enabled, the value of + // isolate_session_state is ignored when deciding whether to share session + // states in a TF server (for backwards compatibility reasons). + // - If share_session_state_in_clusterspec_propagation is true, the session + // states are shared. + // - If share_session_state_in_clusterspec_propagation is false, session + // states are isolated. + // + // When clusterspec propagation is not used, the value of + // share_session_state_in_clusterspec_propagation is ignored when deciding + // whether to share session states in a TF server. + // - If isolate_session_state is true, session states are isolated. + // - If isolate_session_state is false, session states are shared. + // + // TODO(b/129330037): Add a single API that consistently treats + // isolate_session_state and ClusterSpec propagation. + bool share_session_state_in_clusterspec_propagation = 8; + + // If using a direct session, disable spinning while waiting for work in + // the thread pool. This may result in higher latency for completing ops, + // but in the case where there is a lot of spinning may result in lower + // CPU usage. + bool disable_thread_spinning = 9; + + // When true, WorkerSessions are created with device attributes from the + // full cluster. + // This is helpful when a worker wants to partition a graph + // (for example during a PartitionedCallOp). + bool share_cluster_devices_in_session = 10; + + // Metadata about the session. + // + // If set, this can be used by the runtime and the Ops for debugging, + // monitoring, etc. + // + // NOTE: This is currently used and propagated only by the direct session. + SessionMetadata session_metadata = 11; + + // If true, the session may treat the graph as being static for optimization + // purposes. + // + // If this option is set to true when a session is created, the full + // GraphDef must be passed in a single call to Session::Create(), and + // Session::Extend() may not be supported. + bool optimize_for_static_graph = 12; + }; + + Experimental experimental = 16; + + // Next: 17 +} + +// Options for a single Run() call. +message RunOptions { + // TODO(pbar) Turn this into a TraceOptions proto which allows + // tracing to be controlled in a more orthogonal manner? + enum TraceLevel { + NO_TRACE = 0; + SOFTWARE_TRACE = 1; + HARDWARE_TRACE = 2; + FULL_TRACE = 3; + } + TraceLevel trace_level = 1; + + // Time to wait for operation to complete in milliseconds. + int64 timeout_in_ms = 2; + + // The thread pool to use, if session_inter_op_thread_pool is configured. + // To use the caller thread set this to -1 - this uses the caller thread + // to execute Session::Run() and thus avoids a context switch. Using the + // caller thread to execute Session::Run() should be done ONLY for simple + // graphs, where the overhead of an additional context switch is + // comparable with the overhead of Session::Run(). + int32 inter_op_thread_pool = 3; + + // Whether the partition graph(s) executed by the executor(s) should be + // outputted via RunMetadata. + bool output_partition_graphs = 5; + + // EXPERIMENTAL. Options used to initialize DebuggerState, if enabled. + DebugOptions debug_options = 6; + + // When enabled, causes tensor allocation information to be included in + // the error message when the Run() call fails because the allocator ran + // out of memory (OOM). + // + // Enabling this option can slow down the Run() call. + bool report_tensor_allocations_upon_oom = 7; + + // Everything inside Experimental is subject to change and is not subject + // to API stability guarantees in + // https://www.tensorflow.org/guide/version_compat. + message Experimental { + // If non-zero, declares that this graph is going to use collective + // ops and must synchronize step_ids with any other graph with this + // same group_key value (in a distributed computation where tasks + // run disjoint graphs). + int64 collective_graph_key = 1; + // If true, then operations (using the inter-op pool) across all + // session::run() calls will be centrally scheduled, optimizing for (median + // and tail) latency. + // Consider using this option for CPU-bound workloads like inference. + bool use_run_handler_pool = 2; + }; + + Experimental experimental = 8; + + reserved 4; +} + +// Metadata output (i.e., non-Tensor) for a single Run() call. +message RunMetadata { + // Statistics traced for this step. Populated if tracing is turned on via the + // "RunOptions" proto. + // EXPERIMENTAL: The format and set of events may change in future versions. + StepStats step_stats = 1; + + // The cost graph for the computation defined by the run call. + CostGraphDef cost_graph = 2; + + // Graphs of the partitions executed by executors. + repeated GraphDef partition_graphs = 3; + + message FunctionGraphs { + // TODO(nareshmodi): Include some sort of function/cache-key identifier? + repeated GraphDef partition_graphs = 1; + + GraphDef pre_optimization_graph = 2; + GraphDef post_optimization_graph = 3; + } + // This is only populated for graphs that are run as functions in TensorFlow + // V2. There will be an entry below for each function that is traced. + // The main use cases of the post_optimization_graph and the partition_graphs + // is to give the caller insight into the graphs that were actually run by the + // runtime. Additional information (such as those in step_stats) will match + // these graphs. + // We also include the pre_optimization_graph since it is usually easier to + // read, and is helpful in situations where the caller wants to get a high + // level idea of what the built graph looks like (since the various graph + // optimization passes might change the structure of the graph significantly). + repeated FunctionGraphs function_graphs = 4; +} + +// Defines a connection between two tensors in a `GraphDef`. +message TensorConnection { + // A tensor name. The value of this tensor will be substituted for + // the tensor named in `to_tensor`. + string from_tensor = 1; + + // A tensor name. The value of this tensor will be bound to the + // value of the tensor named in `from_tensor`. + string to_tensor = 2; +} + +// Defines a subgraph in another `GraphDef` as a set of feed points and nodes +// to be fetched or executed. +// +// Compare with the arguments to `Session::Run()`. +message CallableOptions { + // Tensors to be fed in the callable. Each feed is the name of a tensor. + repeated string feed = 1; + + // Fetches. A list of tensor names. The caller of the callable expects a + // tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The + // order of specified fetches does not change the execution order. + repeated string fetch = 2; + + // Target Nodes. A list of node names. The named nodes will be run by the + // callable but their outputs will not be returned. + repeated string target = 3; + + // Options that will be applied to each run. + RunOptions run_options = 4; + + // Tensors to be connected in the callable. Each TensorConnection denotes + // a pair of tensors in the graph, between which an edge will be created + // in the callable. + repeated TensorConnection tensor_connection = 5; + + // The Tensor objects fed in the callable and fetched from the callable + // are expected to be backed by host (CPU) memory by default. + // + // The options below allow changing that - feeding tensors backed by + // device memory, or returning tensors that are backed by device memory. + // + // The maps below map the name of a feed/fetch tensor (which appears in + // 'feed' or 'fetch' fields above), to the fully qualified name of the device + // owning the memory backing the contents of the tensor. + // + // For example, creating a callable with the following options: + // + // CallableOptions { + // feed: "a:0" + // feed: "b:0" + // + // fetch: "x:0" + // fetch: "y:0" + // + // feed_devices: { + // "a:0": "/job:localhost/replica:0/task:0/device:GPU:0" + // } + // + // fetch_devices: { + // "y:0": "/job:localhost/replica:0/task:0/device:GPU:0" + // } + // } + // + // means that the Callable expects: + // - The first argument ("a:0") is a Tensor backed by GPU memory. + // - The second argument ("b:0") is a Tensor backed by host memory. + // and of its return values: + // - The first output ("x:0") will be backed by host memory. + // - The second output ("y:0") will be backed by GPU memory. + // + // FEEDS: + // It is the responsibility of the caller to ensure that the memory of the fed + // tensors will be correctly initialized and synchronized before it is + // accessed by operations executed during the call to Session::RunCallable(). + // + // This is typically ensured by using the TensorFlow memory allocators + // (Device::GetAllocator()) to create the Tensor to be fed. + // + // Alternatively, for CUDA-enabled GPU devices, this typically means that the + // operation that produced the contents of the tensor has completed, i.e., the + // CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or + // cuStreamSynchronize()). + map feed_devices = 6; + map fetch_devices = 7; + + // By default, RunCallable() will synchronize the GPU stream before returning + // fetched tensors on a GPU device, to ensure that the values in those tensors + // have been produced. This simplifies interacting with the tensors, but + // potentially incurs a performance hit. + // + // If this options is set to true, the caller is responsible for ensuring + // that the values in the fetched tensors have been produced before they are + // used. The caller can do this by invoking `Device::Sync()` on the underlying + // device(s), or by feeding the tensors back to the same Session using + // `feed_devices` with the same corresponding device name. + bool fetch_skip_sync = 8; + + // Next: 9 +} diff --git a/executor/proto/tensorflow/core/protobuf/control_flow.pb.go b/executor/proto/tensorflow/core/protobuf/control_flow.pb.go new file mode 100644 index 0000000000..107f1d9cf4 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/control_flow.pb.go @@ -0,0 +1,434 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/control_flow.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Protocol buffer representing the values in ControlFlowContext. +type ValuesDef struct { + // Value names that have been seen in this context. + Values []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + // Value names referenced by but external to this context. + ExternalValues map[string]string `protobuf:"bytes,2,rep,name=external_values,json=externalValues,proto3" json:"external_values,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValuesDef) Reset() { *m = ValuesDef{} } +func (m *ValuesDef) String() string { return proto.CompactTextString(m) } +func (*ValuesDef) ProtoMessage() {} +func (*ValuesDef) Descriptor() ([]byte, []int) { + return fileDescriptor_64affc5a646d7df1, []int{0} +} + +func (m *ValuesDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValuesDef.Unmarshal(m, b) +} +func (m *ValuesDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValuesDef.Marshal(b, m, deterministic) +} +func (m *ValuesDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValuesDef.Merge(m, src) +} +func (m *ValuesDef) XXX_Size() int { + return xxx_messageInfo_ValuesDef.Size(m) +} +func (m *ValuesDef) XXX_DiscardUnknown() { + xxx_messageInfo_ValuesDef.DiscardUnknown(m) +} + +var xxx_messageInfo_ValuesDef proto.InternalMessageInfo + +func (m *ValuesDef) GetValues() []string { + if m != nil { + return m.Values + } + return nil +} + +func (m *ValuesDef) GetExternalValues() map[string]string { + if m != nil { + return m.ExternalValues + } + return nil +} + +// Container for any kind of control flow context. Any other control flow +// contexts that are added below should also be added here. +type ControlFlowContextDef struct { + // Types that are valid to be assigned to Ctxt: + // *ControlFlowContextDef_CondCtxt + // *ControlFlowContextDef_WhileCtxt + Ctxt isControlFlowContextDef_Ctxt `protobuf_oneof:"ctxt"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControlFlowContextDef) Reset() { *m = ControlFlowContextDef{} } +func (m *ControlFlowContextDef) String() string { return proto.CompactTextString(m) } +func (*ControlFlowContextDef) ProtoMessage() {} +func (*ControlFlowContextDef) Descriptor() ([]byte, []int) { + return fileDescriptor_64affc5a646d7df1, []int{1} +} + +func (m *ControlFlowContextDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControlFlowContextDef.Unmarshal(m, b) +} +func (m *ControlFlowContextDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControlFlowContextDef.Marshal(b, m, deterministic) +} +func (m *ControlFlowContextDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControlFlowContextDef.Merge(m, src) +} +func (m *ControlFlowContextDef) XXX_Size() int { + return xxx_messageInfo_ControlFlowContextDef.Size(m) +} +func (m *ControlFlowContextDef) XXX_DiscardUnknown() { + xxx_messageInfo_ControlFlowContextDef.DiscardUnknown(m) +} + +var xxx_messageInfo_ControlFlowContextDef proto.InternalMessageInfo + +type isControlFlowContextDef_Ctxt interface { + isControlFlowContextDef_Ctxt() +} + +type ControlFlowContextDef_CondCtxt struct { + CondCtxt *CondContextDef `protobuf:"bytes,1,opt,name=cond_ctxt,json=condCtxt,proto3,oneof"` +} + +type ControlFlowContextDef_WhileCtxt struct { + WhileCtxt *WhileContextDef `protobuf:"bytes,2,opt,name=while_ctxt,json=whileCtxt,proto3,oneof"` +} + +func (*ControlFlowContextDef_CondCtxt) isControlFlowContextDef_Ctxt() {} + +func (*ControlFlowContextDef_WhileCtxt) isControlFlowContextDef_Ctxt() {} + +func (m *ControlFlowContextDef) GetCtxt() isControlFlowContextDef_Ctxt { + if m != nil { + return m.Ctxt + } + return nil +} + +func (m *ControlFlowContextDef) GetCondCtxt() *CondContextDef { + if x, ok := m.GetCtxt().(*ControlFlowContextDef_CondCtxt); ok { + return x.CondCtxt + } + return nil +} + +func (m *ControlFlowContextDef) GetWhileCtxt() *WhileContextDef { + if x, ok := m.GetCtxt().(*ControlFlowContextDef_WhileCtxt); ok { + return x.WhileCtxt + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ControlFlowContextDef) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ControlFlowContextDef_CondCtxt)(nil), + (*ControlFlowContextDef_WhileCtxt)(nil), + } +} + +// Protocol buffer representing a CondContext object. +type CondContextDef struct { + // Name of the context. + ContextName string `protobuf:"bytes,1,opt,name=context_name,json=contextName,proto3" json:"context_name,omitempty"` + // Name of the pred tensor. + PredName string `protobuf:"bytes,2,opt,name=pred_name,json=predName,proto3" json:"pred_name,omitempty"` + // Name of the pivot tensor. + PivotName string `protobuf:"bytes,3,opt,name=pivot_name,json=pivotName,proto3" json:"pivot_name,omitempty"` + // Branch prediction. 0 or 1. + Branch int32 `protobuf:"varint,4,opt,name=branch,proto3" json:"branch,omitempty"` + // Values and external values in control flow context. + ValuesDef *ValuesDef `protobuf:"bytes,5,opt,name=values_def,json=valuesDef,proto3" json:"values_def,omitempty"` + // Contexts contained inside this context (e.g. nested conds). + NestedContexts []*ControlFlowContextDef `protobuf:"bytes,6,rep,name=nested_contexts,json=nestedContexts,proto3" json:"nested_contexts,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CondContextDef) Reset() { *m = CondContextDef{} } +func (m *CondContextDef) String() string { return proto.CompactTextString(m) } +func (*CondContextDef) ProtoMessage() {} +func (*CondContextDef) Descriptor() ([]byte, []int) { + return fileDescriptor_64affc5a646d7df1, []int{2} +} + +func (m *CondContextDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CondContextDef.Unmarshal(m, b) +} +func (m *CondContextDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CondContextDef.Marshal(b, m, deterministic) +} +func (m *CondContextDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_CondContextDef.Merge(m, src) +} +func (m *CondContextDef) XXX_Size() int { + return xxx_messageInfo_CondContextDef.Size(m) +} +func (m *CondContextDef) XXX_DiscardUnknown() { + xxx_messageInfo_CondContextDef.DiscardUnknown(m) +} + +var xxx_messageInfo_CondContextDef proto.InternalMessageInfo + +func (m *CondContextDef) GetContextName() string { + if m != nil { + return m.ContextName + } + return "" +} + +func (m *CondContextDef) GetPredName() string { + if m != nil { + return m.PredName + } + return "" +} + +func (m *CondContextDef) GetPivotName() string { + if m != nil { + return m.PivotName + } + return "" +} + +func (m *CondContextDef) GetBranch() int32 { + if m != nil { + return m.Branch + } + return 0 +} + +func (m *CondContextDef) GetValuesDef() *ValuesDef { + if m != nil { + return m.ValuesDef + } + return nil +} + +func (m *CondContextDef) GetNestedContexts() []*ControlFlowContextDef { + if m != nil { + return m.NestedContexts + } + return nil +} + +// Protocol buffer representing a WhileContext object. +type WhileContextDef struct { + // Name of the context. + ContextName string `protobuf:"bytes,1,opt,name=context_name,json=contextName,proto3" json:"context_name,omitempty"` + // The number of iterations allowed to run in parallel. + ParallelIterations int32 `protobuf:"varint,2,opt,name=parallel_iterations,json=parallelIterations,proto3" json:"parallel_iterations,omitempty"` + // Whether backprop is enabled for this while loop. + BackProp bool `protobuf:"varint,3,opt,name=back_prop,json=backProp,proto3" json:"back_prop,omitempty"` + // Whether GPU-CPU memory swap is enabled for this loop. + SwapMemory bool `protobuf:"varint,4,opt,name=swap_memory,json=swapMemory,proto3" json:"swap_memory,omitempty"` + // Name of the pivot tensor. + PivotName string `protobuf:"bytes,5,opt,name=pivot_name,json=pivotName,proto3" json:"pivot_name,omitempty"` + // Name of the pivot_for_pred tensor. + PivotForPredName string `protobuf:"bytes,6,opt,name=pivot_for_pred_name,json=pivotForPredName,proto3" json:"pivot_for_pred_name,omitempty"` + // Name of the pivot_for_body tensor. + PivotForBodyName string `protobuf:"bytes,7,opt,name=pivot_for_body_name,json=pivotForBodyName,proto3" json:"pivot_for_body_name,omitempty"` + // List of names for exit tensors. + LoopExitNames []string `protobuf:"bytes,8,rep,name=loop_exit_names,json=loopExitNames,proto3" json:"loop_exit_names,omitempty"` + // List of names for enter tensors. + LoopEnterNames []string `protobuf:"bytes,10,rep,name=loop_enter_names,json=loopEnterNames,proto3" json:"loop_enter_names,omitempty"` + // Values and external values in control flow context. + ValuesDef *ValuesDef `protobuf:"bytes,9,opt,name=values_def,json=valuesDef,proto3" json:"values_def,omitempty"` + // Optional name of the maximum_iterations tensor. + MaximumIterationsName string `protobuf:"bytes,11,opt,name=maximum_iterations_name,json=maximumIterationsName,proto3" json:"maximum_iterations_name,omitempty"` + // Contexts contained inside this context (e.g. nested whiles). + NestedContexts []*ControlFlowContextDef `protobuf:"bytes,12,rep,name=nested_contexts,json=nestedContexts,proto3" json:"nested_contexts,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WhileContextDef) Reset() { *m = WhileContextDef{} } +func (m *WhileContextDef) String() string { return proto.CompactTextString(m) } +func (*WhileContextDef) ProtoMessage() {} +func (*WhileContextDef) Descriptor() ([]byte, []int) { + return fileDescriptor_64affc5a646d7df1, []int{3} +} + +func (m *WhileContextDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WhileContextDef.Unmarshal(m, b) +} +func (m *WhileContextDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WhileContextDef.Marshal(b, m, deterministic) +} +func (m *WhileContextDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_WhileContextDef.Merge(m, src) +} +func (m *WhileContextDef) XXX_Size() int { + return xxx_messageInfo_WhileContextDef.Size(m) +} +func (m *WhileContextDef) XXX_DiscardUnknown() { + xxx_messageInfo_WhileContextDef.DiscardUnknown(m) +} + +var xxx_messageInfo_WhileContextDef proto.InternalMessageInfo + +func (m *WhileContextDef) GetContextName() string { + if m != nil { + return m.ContextName + } + return "" +} + +func (m *WhileContextDef) GetParallelIterations() int32 { + if m != nil { + return m.ParallelIterations + } + return 0 +} + +func (m *WhileContextDef) GetBackProp() bool { + if m != nil { + return m.BackProp + } + return false +} + +func (m *WhileContextDef) GetSwapMemory() bool { + if m != nil { + return m.SwapMemory + } + return false +} + +func (m *WhileContextDef) GetPivotName() string { + if m != nil { + return m.PivotName + } + return "" +} + +func (m *WhileContextDef) GetPivotForPredName() string { + if m != nil { + return m.PivotForPredName + } + return "" +} + +func (m *WhileContextDef) GetPivotForBodyName() string { + if m != nil { + return m.PivotForBodyName + } + return "" +} + +func (m *WhileContextDef) GetLoopExitNames() []string { + if m != nil { + return m.LoopExitNames + } + return nil +} + +func (m *WhileContextDef) GetLoopEnterNames() []string { + if m != nil { + return m.LoopEnterNames + } + return nil +} + +func (m *WhileContextDef) GetValuesDef() *ValuesDef { + if m != nil { + return m.ValuesDef + } + return nil +} + +func (m *WhileContextDef) GetMaximumIterationsName() string { + if m != nil { + return m.MaximumIterationsName + } + return "" +} + +func (m *WhileContextDef) GetNestedContexts() []*ControlFlowContextDef { + if m != nil { + return m.NestedContexts + } + return nil +} + +func init() { + proto.RegisterType((*ValuesDef)(nil), "tensorflow.ValuesDef") + proto.RegisterMapType((map[string]string)(nil), "tensorflow.ValuesDef.ExternalValuesEntry") + proto.RegisterType((*ControlFlowContextDef)(nil), "tensorflow.ControlFlowContextDef") + proto.RegisterType((*CondContextDef)(nil), "tensorflow.CondContextDef") + proto.RegisterType((*WhileContextDef)(nil), "tensorflow.WhileContextDef") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/control_flow.proto", fileDescriptor_64affc5a646d7df1) +} + +var fileDescriptor_64affc5a646d7df1 = []byte{ + // 623 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xc6, 0x49, 0x13, 0xe2, 0x49, 0x49, 0xca, 0x96, 0x16, 0xab, 0x15, 0x22, 0xcd, 0x01, 0x05, + 0x21, 0x12, 0xa9, 0x20, 0x04, 0xa8, 0x17, 0x52, 0x5a, 0x01, 0x12, 0x28, 0xf2, 0x01, 0x24, 0x2e, + 0xd6, 0xc6, 0xde, 0xb4, 0x56, 0x6d, 0x8f, 0xb5, 0xde, 0x34, 0xce, 0x23, 0xf0, 0x06, 0x3c, 0x04, + 0x4f, 0xc0, 0x93, 0x71, 0x44, 0x3b, 0xeb, 0x26, 0x71, 0xc8, 0xa1, 0xe2, 0xb6, 0xf3, 0xfd, 0xac, + 0x67, 0xbf, 0x59, 0x2f, 0x3c, 0x53, 0x22, 0xc9, 0x50, 0x4e, 0x22, 0x9c, 0x0d, 0x7c, 0x94, 0x62, + 0x90, 0x4a, 0x54, 0x38, 0x9e, 0x4e, 0x06, 0x3e, 0x26, 0x4a, 0x62, 0xe4, 0x69, 0xaa, 0x4f, 0x28, + 0x83, 0xa5, 0xb8, 0xfb, 0xdb, 0x02, 0xfb, 0x2b, 0x8f, 0xa6, 0x22, 0x7b, 0x2f, 0x26, 0x6c, 0x1f, + 0xea, 0xd7, 0x54, 0x38, 0x56, 0xa7, 0xda, 0xb3, 0xdd, 0xa2, 0x62, 0x2e, 0xb4, 0x45, 0xae, 0x84, + 0x4c, 0x78, 0xe4, 0x15, 0x82, 0x4a, 0xa7, 0xda, 0x6b, 0x1e, 0x3f, 0xed, 0x2f, 0xf7, 0xea, 0x2f, + 0xf6, 0xe9, 0x9f, 0x15, 0x62, 0x83, 0x9c, 0x25, 0x4a, 0xce, 0xdd, 0x96, 0x28, 0x81, 0x07, 0xef, + 0x60, 0x77, 0x83, 0x8c, 0xed, 0x40, 0xf5, 0x4a, 0xcc, 0x1d, 0xab, 0x63, 0xf5, 0x6c, 0x57, 0x2f, + 0xd9, 0x03, 0xa8, 0xd1, 0x37, 0x9d, 0x0a, 0x61, 0xa6, 0x78, 0x5b, 0x79, 0x6d, 0x75, 0x7f, 0x5a, + 0xb0, 0x77, 0x6a, 0xce, 0x77, 0x1e, 0xe1, 0x4c, 0x2f, 0x45, 0xae, 0xf4, 0x41, 0xde, 0x80, 0xed, + 0x63, 0x12, 0x78, 0xbe, 0xca, 0x15, 0xed, 0xd5, 0x3c, 0x3e, 0x58, 0x6d, 0xf5, 0x14, 0x93, 0x60, + 0x29, 0xff, 0x70, 0xc7, 0x6d, 0x68, 0xf9, 0xa9, 0xca, 0x15, 0x3b, 0x01, 0x98, 0x5d, 0x86, 0x91, + 0x30, 0xde, 0x0a, 0x79, 0x0f, 0x57, 0xbd, 0xdf, 0x34, 0x5b, 0x32, 0xdb, 0x64, 0xd0, 0xee, 0x61, + 0x1d, 0xb6, 0xb4, 0xaf, 0xfb, 0xa3, 0x02, 0xad, 0xf2, 0x47, 0xd8, 0x11, 0x6c, 0xfb, 0xa6, 0xf2, + 0x12, 0x1e, 0x8b, 0xe2, 0x88, 0xcd, 0x02, 0xfb, 0xc2, 0x63, 0xc1, 0x0e, 0xc1, 0x4e, 0xa5, 0x08, + 0x0c, 0x6f, 0x8e, 0xdb, 0xd0, 0x00, 0x91, 0x8f, 0x00, 0xd2, 0xf0, 0x1a, 0x0b, 0x77, 0x95, 0x58, + 0x9b, 0x10, 0xa2, 0xf7, 0xa1, 0x3e, 0x96, 0x3c, 0xf1, 0x2f, 0x9d, 0xad, 0x8e, 0xd5, 0xab, 0xb9, + 0x45, 0xc5, 0x5e, 0x02, 0x98, 0x91, 0x79, 0x81, 0x98, 0x38, 0x35, 0x3a, 0xcf, 0xde, 0xc6, 0xb1, + 0xb9, 0xf6, 0xf5, 0xe2, 0x26, 0x7c, 0x82, 0x76, 0x22, 0x32, 0x25, 0x02, 0xaf, 0xe8, 0x2f, 0x73, + 0xea, 0x34, 0xf1, 0xa3, 0xb5, 0x18, 0xff, 0x0d, 0xdf, 0x6d, 0x19, 0x67, 0x81, 0x64, 0xdd, 0x5f, + 0x5b, 0xd0, 0x5e, 0x0b, 0xed, 0x36, 0x61, 0x0c, 0x60, 0x37, 0xe5, 0x92, 0x47, 0x91, 0x88, 0xbc, + 0x50, 0x09, 0xc9, 0x55, 0x88, 0x49, 0x46, 0xb1, 0xd4, 0x5c, 0x76, 0x43, 0x7d, 0x5c, 0x30, 0x3a, + 0xbd, 0x31, 0xf7, 0xaf, 0xbc, 0x54, 0x62, 0x4a, 0xf9, 0x34, 0xdc, 0x86, 0x06, 0x46, 0x12, 0x53, + 0xf6, 0x18, 0x9a, 0xd9, 0x8c, 0xa7, 0x5e, 0x2c, 0x62, 0x94, 0x73, 0xca, 0xa8, 0xe1, 0x82, 0x86, + 0x3e, 0x13, 0xb2, 0x16, 0x6f, 0x6d, 0x3d, 0xde, 0xe7, 0xb0, 0x6b, 0xe8, 0x09, 0x4a, 0x6f, 0x39, + 0xa4, 0x3a, 0xe9, 0x76, 0x88, 0x3a, 0x47, 0x39, 0xba, 0x19, 0x56, 0x49, 0x3e, 0xc6, 0x60, 0x6e, + 0xe4, 0x77, 0xcb, 0xf2, 0x21, 0x06, 0x73, 0x92, 0x3f, 0x81, 0x76, 0x84, 0x98, 0x7a, 0x22, 0x0f, + 0x4d, 0x03, 0x99, 0xd3, 0xa0, 0x3f, 0xf0, 0x9e, 0x86, 0xcf, 0xf2, 0x90, 0x9a, 0xc8, 0x58, 0x0f, + 0x76, 0x8c, 0x2e, 0x51, 0x42, 0x16, 0x42, 0x20, 0x61, 0x8b, 0x84, 0x1a, 0x36, 0xca, 0xf2, 0xd8, + 0xed, 0x5b, 0x8e, 0xfd, 0x15, 0x3c, 0x8c, 0x79, 0x1e, 0xc6, 0xd3, 0x78, 0x25, 0x72, 0xd3, 0x7a, + 0x93, 0x5a, 0xdf, 0x2b, 0xe8, 0x65, 0xec, 0xd4, 0xff, 0x86, 0xeb, 0xb2, 0xfd, 0x9f, 0xd7, 0x65, + 0x98, 0x82, 0x83, 0xf2, 0x62, 0xd5, 0x37, 0x91, 0x3c, 0x16, 0x33, 0x94, 0x57, 0xc3, 0xfb, 0x2b, + 0x5b, 0x8c, 0xf4, 0x63, 0x96, 0x8d, 0xac, 0xef, 0x27, 0x17, 0xa1, 0xba, 0x9c, 0x8e, 0xfb, 0x3e, + 0xc6, 0x83, 0x95, 0x77, 0x70, 0xf3, 0xf2, 0x02, 0xcb, 0x0f, 0xe4, 0x1f, 0xcb, 0x1a, 0xd7, 0xa9, + 0x78, 0xf1, 0x37, 0x00, 0x00, 0xff, 0xff, 0x9d, 0xad, 0xaf, 0x85, 0x46, 0x05, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/control_flow.proto b/executor/proto/tensorflow/core/protobuf/control_flow.proto new file mode 100644 index 0000000000..5f44878c44 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/control_flow.proto @@ -0,0 +1,90 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "ControlFlowProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf"; + +// Control flow context related protocol buffers. + +// Protocol buffer representing the values in ControlFlowContext. +message ValuesDef { + // Value names that have been seen in this context. + repeated string values = 1; + + // Value names referenced by but external to this context. + map external_values = 2; +} + +// Container for any kind of control flow context. Any other control flow +// contexts that are added below should also be added here. +message ControlFlowContextDef { + oneof ctxt { + CondContextDef cond_ctxt = 1; + WhileContextDef while_ctxt = 2; + } +} + +// Protocol buffer representing a CondContext object. +message CondContextDef { + // Name of the context. + string context_name = 1; + + // Name of the pred tensor. + string pred_name = 2; + + // Name of the pivot tensor. + string pivot_name = 3; + + // Branch prediction. 0 or 1. + int32 branch = 4; + + // Values and external values in control flow context. + ValuesDef values_def = 5; + + // Contexts contained inside this context (e.g. nested conds). + repeated ControlFlowContextDef nested_contexts = 6; +} + +// Protocol buffer representing a WhileContext object. +message WhileContextDef { + // Name of the context. + string context_name = 1; + + // The number of iterations allowed to run in parallel. + int32 parallel_iterations = 2; + + // Whether backprop is enabled for this while loop. + bool back_prop = 3; + + // Whether GPU-CPU memory swap is enabled for this loop. + bool swap_memory = 4; + + // Name of the pivot tensor. + string pivot_name = 5; + + // Name of the pivot_for_pred tensor. + string pivot_for_pred_name = 6; + + // Name of the pivot_for_body tensor. + string pivot_for_body_name = 7; + + // List of names for exit tensors. + repeated string loop_exit_names = 8; + + // List of names for enter tensors. + repeated string loop_enter_names = 10; + + // Values and external values in control flow context. + ValuesDef values_def = 9; + + // Optional name of the maximum_iterations tensor. + string maximum_iterations_name = 11; + + // Contexts contained inside this context (e.g. nested whiles). + repeated ControlFlowContextDef nested_contexts = 12; + + // Next available id: 13. +} diff --git a/executor/proto/tensorflow/core/protobuf/conv_autotuning.pb.go b/executor/proto/tensorflow/core/protobuf/conv_autotuning.pb.go new file mode 100644 index 0000000000..16900d20cc --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/conv_autotuning.pb.go @@ -0,0 +1,197 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/conv_autotuning.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// A convolution. Currently it's only used for logging. In the future, we may +// want to use it in the API as well. +type ConvolutionProto struct { + Kind ConvolutionKind `protobuf:"varint,1,opt,name=kind,proto3,enum=stream_executor.dnn.ConvolutionKind" json:"kind,omitempty"` + Input *TensorDescriptorProto `protobuf:"bytes,2,opt,name=input,proto3" json:"input,omitempty"` + Filter *TensorDescriptorProto `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"` + Output *TensorDescriptorProto `protobuf:"bytes,4,opt,name=output,proto3" json:"output,omitempty"` + ConvDesc *ConvolutionDescriptorProto `protobuf:"bytes,5,opt,name=conv_desc,json=convDesc,proto3" json:"conv_desc,omitempty"` + // result = conv_scale * conv(...) + side_value_scale * side_value. + // side_value is an arbitrary buffer if activation is not none. Otherwise, it + // has to be the result buffer (using its old values). + ConvScale float64 `protobuf:"fixed64,6,opt,name=conv_scale,json=convScale,proto3" json:"conv_scale,omitempty"` + SideValueScale float64 `protobuf:"fixed64,7,opt,name=side_value_scale,json=sideValueScale,proto3" json:"side_value_scale,omitempty"` + Activation ActivationMode `protobuf:"varint,8,opt,name=activation,proto3,enum=stream_executor.dnn.ActivationMode" json:"activation,omitempty"` + InputAddress int64 `protobuf:"varint,9,opt,name=input_address,json=inputAddress,proto3" json:"input_address,omitempty"` + FilterAddress int64 `protobuf:"varint,10,opt,name=filter_address,json=filterAddress,proto3" json:"filter_address,omitempty"` + OutputAddress int64 `protobuf:"varint,11,opt,name=output_address,json=outputAddress,proto3" json:"output_address,omitempty"` + BiasAddress int64 `protobuf:"varint,12,opt,name=bias_address,json=biasAddress,proto3" json:"bias_address,omitempty"` + SideInputAddress int64 `protobuf:"varint,13,opt,name=side_input_address,json=sideInputAddress,proto3" json:"side_input_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConvolutionProto) Reset() { *m = ConvolutionProto{} } +func (m *ConvolutionProto) String() string { return proto.CompactTextString(m) } +func (*ConvolutionProto) ProtoMessage() {} +func (*ConvolutionProto) Descriptor() ([]byte, []int) { + return fileDescriptor_5d1b55de4e1b5595, []int{0} +} + +func (m *ConvolutionProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConvolutionProto.Unmarshal(m, b) +} +func (m *ConvolutionProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConvolutionProto.Marshal(b, m, deterministic) +} +func (m *ConvolutionProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConvolutionProto.Merge(m, src) +} +func (m *ConvolutionProto) XXX_Size() int { + return xxx_messageInfo_ConvolutionProto.Size(m) +} +func (m *ConvolutionProto) XXX_DiscardUnknown() { + xxx_messageInfo_ConvolutionProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ConvolutionProto proto.InternalMessageInfo + +func (m *ConvolutionProto) GetKind() ConvolutionKind { + if m != nil { + return m.Kind + } + return ConvolutionKind_INVALID +} + +func (m *ConvolutionProto) GetInput() *TensorDescriptorProto { + if m != nil { + return m.Input + } + return nil +} + +func (m *ConvolutionProto) GetFilter() *TensorDescriptorProto { + if m != nil { + return m.Filter + } + return nil +} + +func (m *ConvolutionProto) GetOutput() *TensorDescriptorProto { + if m != nil { + return m.Output + } + return nil +} + +func (m *ConvolutionProto) GetConvDesc() *ConvolutionDescriptorProto { + if m != nil { + return m.ConvDesc + } + return nil +} + +func (m *ConvolutionProto) GetConvScale() float64 { + if m != nil { + return m.ConvScale + } + return 0 +} + +func (m *ConvolutionProto) GetSideValueScale() float64 { + if m != nil { + return m.SideValueScale + } + return 0 +} + +func (m *ConvolutionProto) GetActivation() ActivationMode { + if m != nil { + return m.Activation + } + return ActivationMode_kNone +} + +func (m *ConvolutionProto) GetInputAddress() int64 { + if m != nil { + return m.InputAddress + } + return 0 +} + +func (m *ConvolutionProto) GetFilterAddress() int64 { + if m != nil { + return m.FilterAddress + } + return 0 +} + +func (m *ConvolutionProto) GetOutputAddress() int64 { + if m != nil { + return m.OutputAddress + } + return 0 +} + +func (m *ConvolutionProto) GetBiasAddress() int64 { + if m != nil { + return m.BiasAddress + } + return 0 +} + +func (m *ConvolutionProto) GetSideInputAddress() int64 { + if m != nil { + return m.SideInputAddress + } + return 0 +} + +func init() { + proto.RegisterType((*ConvolutionProto)(nil), "tensorflow.ConvolutionProto") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/conv_autotuning.proto", fileDescriptor_5d1b55de4e1b5595) +} + +var fileDescriptor_5d1b55de4e1b5595 = []byte{ + // 378 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0x5b, 0x8b, 0xd3, 0x40, + 0x18, 0x86, 0x89, 0x3d, 0xd8, 0x7e, 0x3d, 0x50, 0xe6, 0x2a, 0x08, 0x42, 0xb4, 0x15, 0x82, 0x48, + 0x02, 0xf5, 0xc6, 0x4b, 0x6b, 0xbd, 0x11, 0x15, 0x24, 0x8a, 0xb7, 0x61, 0x9a, 0x99, 0xca, 0x60, + 0x76, 0xa6, 0xcc, 0x21, 0xbb, 0xbf, 0x6c, 0x7f, 0xdf, 0x32, 0xdf, 0xb4, 0xd9, 0x6c, 0x29, 0x2c, + 0xbd, 0x7d, 0xfb, 0x3c, 0xef, 0x7c, 0xbc, 0x0d, 0x64, 0x96, 0x4b, 0xa3, 0xf4, 0xbe, 0x56, 0xb7, + 0x79, 0xa5, 0x34, 0xcf, 0x0f, 0x5a, 0x59, 0xb5, 0x73, 0xfb, 0xbc, 0x52, 0xb2, 0x29, 0xa9, 0xb3, + 0xca, 0x3a, 0x29, 0xe4, 0xbf, 0x0c, 0x7f, 0x20, 0xf0, 0xc8, 0xbf, 0x5a, 0x75, 0x5c, 0x63, 0x35, + 0xa7, 0x37, 0x25, 0xbf, 0xe3, 0x95, 0xb3, 0x4a, 0xe7, 0x4c, 0xca, 0x60, 0xbc, 0xbd, 0x1f, 0xc0, + 0x62, 0xab, 0x64, 0xa3, 0x6a, 0x67, 0x85, 0x92, 0xbf, 0xb0, 0xe6, 0x13, 0xf4, 0xff, 0x0b, 0xc9, + 0xe2, 0x28, 0x89, 0xd2, 0xf9, 0x7a, 0x95, 0x9d, 0xe9, 0x99, 0xd7, 0x3b, 0xd2, 0x77, 0x21, 0x59, + 0x81, 0x06, 0xf9, 0x0c, 0x03, 0x21, 0x0f, 0xce, 0xc6, 0x2f, 0x92, 0x28, 0x9d, 0xac, 0xdf, 0x5f, + 0x54, 0xff, 0xe0, 0x61, 0x5f, 0xb9, 0xa9, 0xb4, 0x38, 0x58, 0xa5, 0xf1, 0xd1, 0x22, 0x88, 0xe4, + 0x0b, 0x0c, 0xf7, 0xa2, 0xb6, 0x5c, 0xc7, 0xbd, 0xab, 0x2b, 0x8e, 0xa6, 0xef, 0x50, 0xce, 0xfa, + 0x33, 0xfa, 0xd7, 0x77, 0x04, 0x93, 0xfc, 0x80, 0x31, 0x6e, 0xcc, 0xb8, 0xa9, 0xe2, 0x01, 0xd6, + 0xe4, 0xcf, 0x0d, 0x71, 0xde, 0x35, 0xf2, 0x0d, 0x3e, 0x24, 0xaf, 0x01, 0xb0, 0xcd, 0x54, 0xb4, + 0xe6, 0xf1, 0x30, 0x89, 0xd2, 0xa8, 0xc0, 0xfe, 0xdf, 0x3e, 0x20, 0x29, 0x2c, 0x8c, 0x60, 0xbc, + 0x6c, 0x68, 0xed, 0xf8, 0x11, 0x7a, 0x89, 0xd0, 0xdc, 0xe7, 0x7f, 0x7d, 0x1c, 0xc8, 0x2d, 0x00, + 0xad, 0xac, 0x68, 0xa8, 0x7f, 0x2f, 0x1e, 0xe1, 0x1f, 0xb4, 0xbc, 0x78, 0xd7, 0xa6, 0xc5, 0x7e, + 0x2a, 0xc6, 0x8b, 0x8e, 0x46, 0x96, 0x30, 0xc3, 0xb1, 0x4b, 0xca, 0x98, 0xe6, 0xc6, 0xc4, 0xe3, + 0x24, 0x4a, 0x7b, 0xc5, 0x14, 0xc3, 0x4d, 0xc8, 0xc8, 0x3b, 0x98, 0x87, 0x39, 0x5b, 0x0a, 0x90, + 0x9a, 0x85, 0xb4, 0x83, 0x85, 0xc5, 0x5a, 0x6c, 0x12, 0xb0, 0x90, 0x9e, 0xb0, 0x37, 0x30, 0xdd, + 0x09, 0x6a, 0x5a, 0x68, 0x8a, 0xd0, 0xc4, 0x67, 0x27, 0xe4, 0x03, 0x10, 0x1c, 0xe1, 0xe9, 0x69, + 0x33, 0x04, 0x71, 0x9e, 0x6f, 0x9d, 0xf3, 0x76, 0x43, 0xfc, 0x7e, 0x3f, 0x3e, 0x04, 0x00, 0x00, + 0xff, 0xff, 0x60, 0x18, 0x9d, 0x78, 0x23, 0x03, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/conv_autotuning.proto b/executor/proto/tensorflow/core/protobuf/conv_autotuning.proto new file mode 100644 index 0000000000..5616299a6d --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/conv_autotuning.proto @@ -0,0 +1,31 @@ +// This is used for convolution logging. Also see +// tensorflow/core/protobuf/autotuing.h +syntax = "proto3"; + +package tensorflow; + +import "tensorflow/stream_executor/dnn.proto"; + +// A convolution. Currently it's only used for logging. In the future, we may +// want to use it in the API as well. +message ConvolutionProto { + stream_executor.dnn.ConvolutionKind kind = 1; + stream_executor.dnn.TensorDescriptorProto input = 2; + stream_executor.dnn.TensorDescriptorProto filter = 3; + stream_executor.dnn.TensorDescriptorProto output = 4; + stream_executor.dnn.ConvolutionDescriptorProto conv_desc = 5; + + // result = conv_scale * conv(...) + side_value_scale * side_value. + // side_value is an arbitrary buffer if activation is not none. Otherwise, it + // has to be the result buffer (using its old values). + double conv_scale = 6; + double side_value_scale = 7; + + stream_executor.dnn.ActivationMode activation = 8; + + int64 input_address = 9; + int64 filter_address = 10; + int64 output_address = 11; + int64 bias_address = 12; + int64 side_input_address = 13; +} diff --git a/executor/proto/tensorflow/core/protobuf/critical_section.pb.go b/executor/proto/tensorflow/core/protobuf/critical_section.pb.go new file mode 100644 index 0000000000..a8d1600f87 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/critical_section.pb.go @@ -0,0 +1,142 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/critical_section.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Protocol buffer representing a CriticalSection. +type CriticalSectionDef struct { + // Name of the critical section handle. + CriticalSectionName string `protobuf:"bytes,1,opt,name=critical_section_name,json=criticalSectionName,proto3" json:"critical_section_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CriticalSectionDef) Reset() { *m = CriticalSectionDef{} } +func (m *CriticalSectionDef) String() string { return proto.CompactTextString(m) } +func (*CriticalSectionDef) ProtoMessage() {} +func (*CriticalSectionDef) Descriptor() ([]byte, []int) { + return fileDescriptor_d30d8be90fd098b9, []int{0} +} + +func (m *CriticalSectionDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CriticalSectionDef.Unmarshal(m, b) +} +func (m *CriticalSectionDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CriticalSectionDef.Marshal(b, m, deterministic) +} +func (m *CriticalSectionDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_CriticalSectionDef.Merge(m, src) +} +func (m *CriticalSectionDef) XXX_Size() int { + return xxx_messageInfo_CriticalSectionDef.Size(m) +} +func (m *CriticalSectionDef) XXX_DiscardUnknown() { + xxx_messageInfo_CriticalSectionDef.DiscardUnknown(m) +} + +var xxx_messageInfo_CriticalSectionDef proto.InternalMessageInfo + +func (m *CriticalSectionDef) GetCriticalSectionName() string { + if m != nil { + return m.CriticalSectionName + } + return "" +} + +// Protocol buffer representing a CriticalSection execution. +type CriticalSectionExecutionDef struct { + // Name of the critical section handle. + ExecuteInCriticalSectionName string `protobuf:"bytes,1,opt,name=execute_in_critical_section_name,json=executeInCriticalSectionName,proto3" json:"execute_in_critical_section_name,omitempty"` + // Whether this operation requires exclusive access to its resources, + // (i.e., no other CriticalSections may request the same resources). + ExclusiveResourceAccess bool `protobuf:"varint,2,opt,name=exclusive_resource_access,json=exclusiveResourceAccess,proto3" json:"exclusive_resource_access,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CriticalSectionExecutionDef) Reset() { *m = CriticalSectionExecutionDef{} } +func (m *CriticalSectionExecutionDef) String() string { return proto.CompactTextString(m) } +func (*CriticalSectionExecutionDef) ProtoMessage() {} +func (*CriticalSectionExecutionDef) Descriptor() ([]byte, []int) { + return fileDescriptor_d30d8be90fd098b9, []int{1} +} + +func (m *CriticalSectionExecutionDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CriticalSectionExecutionDef.Unmarshal(m, b) +} +func (m *CriticalSectionExecutionDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CriticalSectionExecutionDef.Marshal(b, m, deterministic) +} +func (m *CriticalSectionExecutionDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_CriticalSectionExecutionDef.Merge(m, src) +} +func (m *CriticalSectionExecutionDef) XXX_Size() int { + return xxx_messageInfo_CriticalSectionExecutionDef.Size(m) +} +func (m *CriticalSectionExecutionDef) XXX_DiscardUnknown() { + xxx_messageInfo_CriticalSectionExecutionDef.DiscardUnknown(m) +} + +var xxx_messageInfo_CriticalSectionExecutionDef proto.InternalMessageInfo + +func (m *CriticalSectionExecutionDef) GetExecuteInCriticalSectionName() string { + if m != nil { + return m.ExecuteInCriticalSectionName + } + return "" +} + +func (m *CriticalSectionExecutionDef) GetExclusiveResourceAccess() bool { + if m != nil { + return m.ExclusiveResourceAccess + } + return false +} + +func init() { + proto.RegisterType((*CriticalSectionDef)(nil), "tensorflow.CriticalSectionDef") + proto.RegisterType((*CriticalSectionExecutionDef)(nil), "tensorflow.CriticalSectionExecutionDef") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/critical_section.proto", fileDescriptor_d30d8be90fd098b9) +} + +var fileDescriptor_d30d8be90fd098b9 = []byte{ + // 249 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0x31, 0x4f, 0xc3, 0x30, + 0x10, 0x85, 0x65, 0x06, 0x04, 0x1e, 0x83, 0x2a, 0x82, 0x60, 0x88, 0x3a, 0x75, 0x4a, 0x24, 0xd8, + 0x10, 0x0b, 0x2d, 0x20, 0x58, 0x50, 0x15, 0x36, 0x16, 0xcb, 0x39, 0x5d, 0x82, 0x45, 0xe2, 0x43, + 0x67, 0x9b, 0xf6, 0xef, 0xf0, 0x2f, 0x19, 0x11, 0x26, 0x50, 0x12, 0x50, 0x37, 0x9f, 0xde, 0xf7, + 0xde, 0xc9, 0xef, 0x64, 0xe1, 0xd1, 0x3a, 0xe2, 0xba, 0xa5, 0x55, 0x01, 0xc4, 0x58, 0xbc, 0x30, + 0x79, 0xaa, 0x42, 0x5d, 0x00, 0x1b, 0x6f, 0x40, 0xb7, 0xca, 0x21, 0x78, 0x43, 0x36, 0x8f, 0x4a, + 0x22, 0x37, 0x86, 0xe9, 0xad, 0x4c, 0x16, 0x3d, 0xf5, 0xf0, 0x05, 0x5d, 0x61, 0x9d, 0x9c, 0xca, + 0xc9, 0xd8, 0xab, 0xac, 0xee, 0x30, 0x15, 0x99, 0x98, 0xed, 0x97, 0x07, 0x30, 0xb4, 0xdc, 0xeb, + 0x0e, 0xa7, 0x6f, 0x42, 0x1e, 0x8f, 0xa2, 0xae, 0xd7, 0x08, 0xe1, 0x3b, 0xf3, 0x46, 0x66, 0x18, + 0x67, 0x54, 0xc6, 0xaa, 0x6d, 0xf1, 0x27, 0x3d, 0x77, 0x67, 0x17, 0x7f, 0xf7, 0x24, 0xe7, 0xf2, + 0x08, 0xd7, 0xd0, 0x06, 0x67, 0x5e, 0x51, 0x31, 0x3a, 0x0a, 0x0c, 0xa8, 0x34, 0x00, 0x3a, 0x97, + 0xee, 0x64, 0x62, 0xb6, 0x57, 0x1e, 0xfe, 0x00, 0x65, 0xaf, 0x5f, 0x46, 0x79, 0xee, 0x65, 0x4a, + 0xdc, 0xe4, 0x9b, 0xff, 0xe7, 0x35, 0xeb, 0x0e, 0x57, 0xc4, 0xcf, 0xf3, 0xc9, 0x68, 0xd9, 0xf2, + 0xb3, 0x2b, 0xb7, 0x14, 0x8f, 0x17, 0x8d, 0xf1, 0x4f, 0xa1, 0xca, 0x81, 0xba, 0xdf, 0x55, 0xff, + 0xff, 0x6c, 0x68, 0x78, 0x83, 0x77, 0x21, 0xaa, 0xdd, 0x38, 0x9c, 0x7d, 0x04, 0x00, 0x00, 0xff, + 0xff, 0xf4, 0xdd, 0x9c, 0x03, 0xa9, 0x01, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/critical_section.proto b/executor/proto/tensorflow/core/protobuf/critical_section.proto new file mode 100644 index 0000000000..7954e7ba87 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/critical_section.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "CriticalSectionProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf"; + +// Protocol buffer representing a CriticalSection. +message CriticalSectionDef { + // Name of the critical section handle. + string critical_section_name = 1; +} + +// Protocol buffer representing a CriticalSection execution. +message CriticalSectionExecutionDef { + // Name of the critical section handle. + string execute_in_critical_section_name = 1; + // Whether this operation requires exclusive access to its resources, + // (i.e., no other CriticalSections may request the same resources). + bool exclusive_resource_access = 2; +} diff --git a/executor/proto/tensorflow/core/protobuf/data/experimental/snapshot.proto b/executor/proto/tensorflow/core/protobuf/data/experimental/snapshot.proto new file mode 100644 index 0000000000..422602d376 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/data/experimental/snapshot.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package tensorflow.data.experimental; + +import "tensorflow/core/framework/tensor.proto"; + +// Each SnapshotRecord represents one batch of pre-processed input data. A batch +// consists of a list of tensors that we encode as TensorProtos. This message +// doesn't store the structure of the batch. +message SnapshotRecord { + repeated .tensorflow.TensorProto tensor = 1; +} + +// This stores the metadata information present in each snapshot record. +message SnapshotMetadataRecord { + string graph_hash = 1; + string run_id = 2; + int64 creation_timestamp = 3; + + bool finalized = 1000; +} diff --git a/executor/proto/tensorflow/core/protobuf/debug.pb.go b/executor/proto/tensorflow/core/protobuf/debug.pb.go new file mode 100644 index 0000000000..1b646b467c --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/debug.pb.go @@ -0,0 +1,352 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/debug.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Option for watching a node in TensorFlow Debugger (tfdbg). +type DebugTensorWatch struct { + // Name of the node to watch. + // Use "*" for wildcard. But note: currently, regex is not supported in + // general. + NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` + // Output slot to watch. + // The semantics of output_slot == -1 is that all outputs of the node + // will be watched (i.e., a wildcard). + // Other negative values of output_slot are invalid and will lead to + // errors currently. + OutputSlot int32 `protobuf:"varint,2,opt,name=output_slot,json=outputSlot,proto3" json:"output_slot,omitempty"` + // Name(s) of the debugging op(s). + // One or more than one probes on a tensor. + // e.g., {"DebugIdentity", "DebugNanCount"} + DebugOps []string `protobuf:"bytes,3,rep,name=debug_ops,json=debugOps,proto3" json:"debug_ops,omitempty"` + // URL(s) for debug targets(s). + // + // Supported URL formats are: + // - file:///foo/tfdbg_dump: Writes out Event content to file + // /foo/tfdbg_dump. Assumes all directories can be created if they don't + // already exist. + // - grpc://localhost:11011: Sends an RPC request to an EventListener + // service running at localhost:11011 with the event. + // - memcbk:///event_key: Routes tensors to clients using the + // callback registered with the DebugCallbackRegistry for event_key. + // + // Each debug op listed in debug_ops will publish its output tensor (debug + // signal) to all URLs in debug_urls. + // + // N.B. Session::Run() supports concurrent invocations of the same inputs + // (feed keys), outputs and target nodes. If such concurrent invocations + // are to be debugged, the callers of Session::Run() must use distinct + // debug_urls to make sure that the streamed or dumped events do not overlap + // among the invocations. + // TODO(cais): More visible documentation of this in g3docs. + DebugUrls []string `protobuf:"bytes,4,rep,name=debug_urls,json=debugUrls,proto3" json:"debug_urls,omitempty"` + // Do not error out if debug op creation fails (e.g., due to dtype + // incompatibility). Instead, just log the failure. + TolerateDebugOpCreationFailures bool `protobuf:"varint,5,opt,name=tolerate_debug_op_creation_failures,json=tolerateDebugOpCreationFailures,proto3" json:"tolerate_debug_op_creation_failures,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DebugTensorWatch) Reset() { *m = DebugTensorWatch{} } +func (m *DebugTensorWatch) String() string { return proto.CompactTextString(m) } +func (*DebugTensorWatch) ProtoMessage() {} +func (*DebugTensorWatch) Descriptor() ([]byte, []int) { + return fileDescriptor_4fbf764b7c91eef6, []int{0} +} + +func (m *DebugTensorWatch) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DebugTensorWatch.Unmarshal(m, b) +} +func (m *DebugTensorWatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DebugTensorWatch.Marshal(b, m, deterministic) +} +func (m *DebugTensorWatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_DebugTensorWatch.Merge(m, src) +} +func (m *DebugTensorWatch) XXX_Size() int { + return xxx_messageInfo_DebugTensorWatch.Size(m) +} +func (m *DebugTensorWatch) XXX_DiscardUnknown() { + xxx_messageInfo_DebugTensorWatch.DiscardUnknown(m) +} + +var xxx_messageInfo_DebugTensorWatch proto.InternalMessageInfo + +func (m *DebugTensorWatch) GetNodeName() string { + if m != nil { + return m.NodeName + } + return "" +} + +func (m *DebugTensorWatch) GetOutputSlot() int32 { + if m != nil { + return m.OutputSlot + } + return 0 +} + +func (m *DebugTensorWatch) GetDebugOps() []string { + if m != nil { + return m.DebugOps + } + return nil +} + +func (m *DebugTensorWatch) GetDebugUrls() []string { + if m != nil { + return m.DebugUrls + } + return nil +} + +func (m *DebugTensorWatch) GetTolerateDebugOpCreationFailures() bool { + if m != nil { + return m.TolerateDebugOpCreationFailures + } + return false +} + +// Options for initializing DebuggerState in TensorFlow Debugger (tfdbg). +type DebugOptions struct { + // Debugging options + DebugTensorWatchOpts []*DebugTensorWatch `protobuf:"bytes,4,rep,name=debug_tensor_watch_opts,json=debugTensorWatchOpts,proto3" json:"debug_tensor_watch_opts,omitempty"` + // Caller-specified global step count. + // Note that this is distinct from the session run count and the executor + // step count. + GlobalStep int64 `protobuf:"varint,10,opt,name=global_step,json=globalStep,proto3" json:"global_step,omitempty"` + // Whether the total disk usage of tfdbg is to be reset to zero + // in this Session.run call. This is used by wrappers and hooks + // such as the local CLI ones to indicate that the dumped tensors + // are cleaned up from the disk after each Session.run. + ResetDiskByteUsage bool `protobuf:"varint,11,opt,name=reset_disk_byte_usage,json=resetDiskByteUsage,proto3" json:"reset_disk_byte_usage,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DebugOptions) Reset() { *m = DebugOptions{} } +func (m *DebugOptions) String() string { return proto.CompactTextString(m) } +func (*DebugOptions) ProtoMessage() {} +func (*DebugOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_4fbf764b7c91eef6, []int{1} +} + +func (m *DebugOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DebugOptions.Unmarshal(m, b) +} +func (m *DebugOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DebugOptions.Marshal(b, m, deterministic) +} +func (m *DebugOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DebugOptions.Merge(m, src) +} +func (m *DebugOptions) XXX_Size() int { + return xxx_messageInfo_DebugOptions.Size(m) +} +func (m *DebugOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DebugOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_DebugOptions proto.InternalMessageInfo + +func (m *DebugOptions) GetDebugTensorWatchOpts() []*DebugTensorWatch { + if m != nil { + return m.DebugTensorWatchOpts + } + return nil +} + +func (m *DebugOptions) GetGlobalStep() int64 { + if m != nil { + return m.GlobalStep + } + return 0 +} + +func (m *DebugOptions) GetResetDiskByteUsage() bool { + if m != nil { + return m.ResetDiskByteUsage + } + return false +} + +type DebuggedSourceFile struct { + // The host name on which a source code file is located. + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // Path to the source code file. + FilePath string `protobuf:"bytes,2,opt,name=file_path,json=filePath,proto3" json:"file_path,omitempty"` + // The timestamp at which the source code file is last modified. + LastModified int64 `protobuf:"varint,3,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"` + // Byte size of the file. + Bytes int64 `protobuf:"varint,4,opt,name=bytes,proto3" json:"bytes,omitempty"` + // Line-by-line content of the source code file. + Lines []string `protobuf:"bytes,5,rep,name=lines,proto3" json:"lines,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DebuggedSourceFile) Reset() { *m = DebuggedSourceFile{} } +func (m *DebuggedSourceFile) String() string { return proto.CompactTextString(m) } +func (*DebuggedSourceFile) ProtoMessage() {} +func (*DebuggedSourceFile) Descriptor() ([]byte, []int) { + return fileDescriptor_4fbf764b7c91eef6, []int{2} +} + +func (m *DebuggedSourceFile) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DebuggedSourceFile.Unmarshal(m, b) +} +func (m *DebuggedSourceFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DebuggedSourceFile.Marshal(b, m, deterministic) +} +func (m *DebuggedSourceFile) XXX_Merge(src proto.Message) { + xxx_messageInfo_DebuggedSourceFile.Merge(m, src) +} +func (m *DebuggedSourceFile) XXX_Size() int { + return xxx_messageInfo_DebuggedSourceFile.Size(m) +} +func (m *DebuggedSourceFile) XXX_DiscardUnknown() { + xxx_messageInfo_DebuggedSourceFile.DiscardUnknown(m) +} + +var xxx_messageInfo_DebuggedSourceFile proto.InternalMessageInfo + +func (m *DebuggedSourceFile) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *DebuggedSourceFile) GetFilePath() string { + if m != nil { + return m.FilePath + } + return "" +} + +func (m *DebuggedSourceFile) GetLastModified() int64 { + if m != nil { + return m.LastModified + } + return 0 +} + +func (m *DebuggedSourceFile) GetBytes() int64 { + if m != nil { + return m.Bytes + } + return 0 +} + +func (m *DebuggedSourceFile) GetLines() []string { + if m != nil { + return m.Lines + } + return nil +} + +type DebuggedSourceFiles struct { + // A collection of source code files. + SourceFiles []*DebuggedSourceFile `protobuf:"bytes,1,rep,name=source_files,json=sourceFiles,proto3" json:"source_files,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DebuggedSourceFiles) Reset() { *m = DebuggedSourceFiles{} } +func (m *DebuggedSourceFiles) String() string { return proto.CompactTextString(m) } +func (*DebuggedSourceFiles) ProtoMessage() {} +func (*DebuggedSourceFiles) Descriptor() ([]byte, []int) { + return fileDescriptor_4fbf764b7c91eef6, []int{3} +} + +func (m *DebuggedSourceFiles) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DebuggedSourceFiles.Unmarshal(m, b) +} +func (m *DebuggedSourceFiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DebuggedSourceFiles.Marshal(b, m, deterministic) +} +func (m *DebuggedSourceFiles) XXX_Merge(src proto.Message) { + xxx_messageInfo_DebuggedSourceFiles.Merge(m, src) +} +func (m *DebuggedSourceFiles) XXX_Size() int { + return xxx_messageInfo_DebuggedSourceFiles.Size(m) +} +func (m *DebuggedSourceFiles) XXX_DiscardUnknown() { + xxx_messageInfo_DebuggedSourceFiles.DiscardUnknown(m) +} + +var xxx_messageInfo_DebuggedSourceFiles proto.InternalMessageInfo + +func (m *DebuggedSourceFiles) GetSourceFiles() []*DebuggedSourceFile { + if m != nil { + return m.SourceFiles + } + return nil +} + +func init() { + proto.RegisterType((*DebugTensorWatch)(nil), "tensorflow.DebugTensorWatch") + proto.RegisterType((*DebugOptions)(nil), "tensorflow.DebugOptions") + proto.RegisterType((*DebuggedSourceFile)(nil), "tensorflow.DebuggedSourceFile") + proto.RegisterType((*DebuggedSourceFiles)(nil), "tensorflow.DebuggedSourceFiles") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/debug.proto", fileDescriptor_4fbf764b7c91eef6) +} + +var fileDescriptor_4fbf764b7c91eef6 = []byte{ + // 483 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x93, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xc7, 0xb5, 0xa4, 0x45, 0xed, 0x24, 0x48, 0x68, 0x29, 0xc2, 0x12, 0x1f, 0x8d, 0x52, 0x0e, + 0x39, 0x25, 0x02, 0xae, 0x5c, 0x08, 0x51, 0x4f, 0x40, 0x23, 0x87, 0x0a, 0xc4, 0x65, 0xb5, 0x8e, + 0xc7, 0x8e, 0xc9, 0x26, 0x63, 0xed, 0x8c, 0x15, 0xf5, 0x45, 0x78, 0x15, 0xde, 0x85, 0x27, 0xe1, + 0x88, 0x76, 0x9d, 0x2a, 0xfd, 0xe0, 0xe6, 0xf9, 0xfd, 0x67, 0x66, 0x67, 0xff, 0xb3, 0x86, 0xd7, + 0x82, 0x1b, 0x26, 0x5f, 0x38, 0xda, 0x8e, 0x17, 0xe4, 0x71, 0x5c, 0x7b, 0x12, 0xca, 0x9a, 0x62, + 0x9c, 0x63, 0xd6, 0x94, 0xa3, 0x18, 0x6a, 0xd8, 0x67, 0x0d, 0xfe, 0x28, 0x78, 0x3c, 0x0d, 0xda, + 0xd7, 0xc8, 0xbe, 0x59, 0x59, 0x2c, 0xf5, 0x73, 0x38, 0xde, 0x50, 0x8e, 0x66, 0x63, 0xd7, 0x98, + 0xa8, 0xbe, 0x1a, 0x1e, 0xa7, 0x47, 0x01, 0x7c, 0xb1, 0x6b, 0xd4, 0xa7, 0xd0, 0xa5, 0x46, 0xea, + 0x46, 0x0c, 0x3b, 0x92, 0xe4, 0x41, 0x5f, 0x0d, 0x0f, 0x53, 0x68, 0xd1, 0xdc, 0x91, 0x84, 0xea, + 0x78, 0x9a, 0xa1, 0x9a, 0x93, 0x4e, 0xbf, 0x13, 0xaa, 0x23, 0xb8, 0xa8, 0x59, 0xbf, 0x04, 0x68, + 0xc5, 0xc6, 0x3b, 0x4e, 0x0e, 0xa2, 0xda, 0xa6, 0x5f, 0x7a, 0xc7, 0xfa, 0x13, 0x9c, 0x09, 0x39, + 0xf4, 0x56, 0xd0, 0x5c, 0x37, 0x31, 0x0b, 0x8f, 0x56, 0x2a, 0xda, 0x98, 0xc2, 0x56, 0xae, 0xf1, + 0xc8, 0xc9, 0x61, 0x5f, 0x0d, 0x8f, 0xd2, 0xd3, 0xeb, 0xd4, 0x69, 0xdb, 0xfd, 0xe3, 0x2e, 0xef, + 0x7c, 0x97, 0x36, 0xf8, 0xad, 0xa0, 0xb7, 0xd3, 0x02, 0x67, 0x3d, 0x87, 0x67, 0x6d, 0xd7, 0xd6, + 0x01, 0xb3, 0x0d, 0xd7, 0x35, 0x54, 0x4b, 0x3b, 0x4a, 0xf7, 0xed, 0x8b, 0xd1, 0xde, 0x9b, 0xd1, + 0x5d, 0x5f, 0xd2, 0x93, 0xfc, 0x0e, 0xb9, 0xa8, 0x85, 0x83, 0x21, 0xa5, 0xa3, 0xcc, 0x3a, 0xc3, + 0x82, 0x75, 0x02, 0x7d, 0x35, 0xec, 0xa4, 0xd0, 0xa2, 0xb9, 0x60, 0xad, 0xdf, 0xc0, 0x53, 0x8f, + 0x8c, 0x62, 0xf2, 0x8a, 0x57, 0x26, 0xbb, 0x12, 0x34, 0x0d, 0xdb, 0x12, 0x93, 0x6e, 0xbc, 0x86, + 0x8e, 0xe2, 0xb4, 0xe2, 0xd5, 0xe4, 0x4a, 0xf0, 0x32, 0x28, 0x83, 0x5f, 0x0a, 0x74, 0x3c, 0xbe, + 0xc4, 0x7c, 0x4e, 0x8d, 0x5f, 0xe0, 0x79, 0xe5, 0x50, 0x6b, 0x38, 0x58, 0x12, 0xcb, 0x6e, 0x27, + 0xf1, 0x3b, 0xd8, 0x5d, 0x54, 0x0e, 0x4d, 0x6d, 0x65, 0x19, 0xb7, 0x71, 0x9c, 0x1e, 0x05, 0x30, + 0xb3, 0xb2, 0xd4, 0x67, 0xf0, 0xc8, 0x59, 0x16, 0xb3, 0xa6, 0xbc, 0x2a, 0x2a, 0xcc, 0x93, 0x4e, + 0x9c, 0xae, 0x17, 0xe0, 0xe7, 0x1d, 0xd3, 0x27, 0x70, 0x18, 0x86, 0x0a, 0x1e, 0x04, 0xb1, 0x0d, + 0x02, 0x75, 0xd5, 0x26, 0x9a, 0x1d, 0x96, 0xd4, 0x06, 0x83, 0xef, 0xf0, 0xe4, 0xfe, 0x5c, 0xac, + 0x3f, 0x40, 0x8f, 0x63, 0x68, 0xc2, 0xd1, 0x9c, 0xa8, 0xe8, 0xe6, 0xab, 0x7b, 0x6e, 0xde, 0x2a, + 0x4b, 0xbb, 0xbc, 0x6f, 0x31, 0xf9, 0x09, 0x09, 0xf9, 0xf2, 0x66, 0x45, 0xe1, 0xed, 0x1a, 0xb7, + 0xe4, 0x57, 0x93, 0x6e, 0x2c, 0x9e, 0x85, 0xd7, 0xcb, 0x33, 0xf5, 0xe3, 0x7d, 0x59, 0xc9, 0xb2, + 0xc9, 0x46, 0x0b, 0x5a, 0x8f, 0x6f, 0xbc, 0xf8, 0xff, 0x7f, 0x96, 0x74, 0xfb, 0x57, 0xf8, 0xab, + 0x54, 0xf6, 0x30, 0x06, 0xef, 0xfe, 0x05, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x4e, 0xc2, 0xa9, 0x30, + 0x03, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/debug.proto b/executor/proto/tensorflow/core/protobuf/debug.proto new file mode 100644 index 0000000000..3cfab170f0 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/debug.proto @@ -0,0 +1,93 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "DebugProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf"; + +// Option for watching a node in TensorFlow Debugger (tfdbg). +message DebugTensorWatch { + // Name of the node to watch. + // Use "*" for wildcard. But note: currently, regex is not supported in + // general. + string node_name = 1; + + // Output slot to watch. + // The semantics of output_slot == -1 is that all outputs of the node + // will be watched (i.e., a wildcard). + // Other negative values of output_slot are invalid and will lead to + // errors currently. + int32 output_slot = 2; + + // Name(s) of the debugging op(s). + // One or more than one probes on a tensor. + // e.g., {"DebugIdentity", "DebugNanCount"} + repeated string debug_ops = 3; + + // URL(s) for debug targets(s). + // + // Supported URL formats are: + // - file:///foo/tfdbg_dump: Writes out Event content to file + // /foo/tfdbg_dump. Assumes all directories can be created if they don't + // already exist. + // - grpc://localhost:11011: Sends an RPC request to an EventListener + // service running at localhost:11011 with the event. + // - memcbk:///event_key: Routes tensors to clients using the + // callback registered with the DebugCallbackRegistry for event_key. + // + // Each debug op listed in debug_ops will publish its output tensor (debug + // signal) to all URLs in debug_urls. + // + // N.B. Session::Run() supports concurrent invocations of the same inputs + // (feed keys), outputs and target nodes. If such concurrent invocations + // are to be debugged, the callers of Session::Run() must use distinct + // debug_urls to make sure that the streamed or dumped events do not overlap + // among the invocations. + // TODO(cais): More visible documentation of this in g3docs. + repeated string debug_urls = 4; + + // Do not error out if debug op creation fails (e.g., due to dtype + // incompatibility). Instead, just log the failure. + bool tolerate_debug_op_creation_failures = 5; +} + +// Options for initializing DebuggerState in TensorFlow Debugger (tfdbg). +message DebugOptions { + // Debugging options + repeated DebugTensorWatch debug_tensor_watch_opts = 4; + + // Caller-specified global step count. + // Note that this is distinct from the session run count and the executor + // step count. + int64 global_step = 10; + + // Whether the total disk usage of tfdbg is to be reset to zero + // in this Session.run call. This is used by wrappers and hooks + // such as the local CLI ones to indicate that the dumped tensors + // are cleaned up from the disk after each Session.run. + bool reset_disk_byte_usage = 11; +} + +message DebuggedSourceFile { + // The host name on which a source code file is located. + string host = 1; + + // Path to the source code file. + string file_path = 2; + + // The timestamp at which the source code file is last modified. + int64 last_modified = 3; + + // Byte size of the file. + int64 bytes = 4; + + // Line-by-line content of the source code file. + repeated string lines = 5; +} + +message DebuggedSourceFiles { + // A collection of source code files. + repeated DebuggedSourceFile source_files = 1; +} diff --git a/executor/proto/tensorflow/core/protobuf/device_properties.pb.go b/executor/proto/tensorflow/core/protobuf/device_properties.pb.go new file mode 100644 index 0000000000..36fb547e25 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/device_properties.pb.go @@ -0,0 +1,261 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/device_properties.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type DeviceProperties struct { + // Device type (CPU, GPU, ...) + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Vendor (Intel, nvidia, ...) + Vendor string `protobuf:"bytes,2,opt,name=vendor,proto3" json:"vendor,omitempty"` + // Model (Haswell, K40, ...) + Model string `protobuf:"bytes,3,opt,name=model,proto3" json:"model,omitempty"` + // Core Frequency in Mhz + Frequency int64 `protobuf:"varint,4,opt,name=frequency,proto3" json:"frequency,omitempty"` + // Number of cores + NumCores int64 `protobuf:"varint,5,opt,name=num_cores,json=numCores,proto3" json:"num_cores,omitempty"` + // Version of the tools and libraries used with this device (e.g. gcc 4.9, + // cudnn 5.1) + Environment map[string]string `protobuf:"bytes,6,rep,name=environment,proto3" json:"environment,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Number of registers per core. + NumRegisters int64 `protobuf:"varint,7,opt,name=num_registers,json=numRegisters,proto3" json:"num_registers,omitempty"` + // L1 cache size in bytes + L1CacheSize int64 `protobuf:"varint,8,opt,name=l1_cache_size,json=l1CacheSize,proto3" json:"l1_cache_size,omitempty"` + // L2 cache size in bytes + L2CacheSize int64 `protobuf:"varint,9,opt,name=l2_cache_size,json=l2CacheSize,proto3" json:"l2_cache_size,omitempty"` + // L3 cache size in bytes + L3CacheSize int64 `protobuf:"varint,10,opt,name=l3_cache_size,json=l3CacheSize,proto3" json:"l3_cache_size,omitempty"` + // Shared memory size per multiprocessor in bytes. This field is + // applicable to GPUs only. + SharedMemorySizePerMultiprocessor int64 `protobuf:"varint,11,opt,name=shared_memory_size_per_multiprocessor,json=sharedMemorySizePerMultiprocessor,proto3" json:"shared_memory_size_per_multiprocessor,omitempty"` + // Memory size in bytes + MemorySize int64 `protobuf:"varint,12,opt,name=memory_size,json=memorySize,proto3" json:"memory_size,omitempty"` + // Memory bandwidth in KB/s + Bandwidth int64 `protobuf:"varint,13,opt,name=bandwidth,proto3" json:"bandwidth,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeviceProperties) Reset() { *m = DeviceProperties{} } +func (m *DeviceProperties) String() string { return proto.CompactTextString(m) } +func (*DeviceProperties) ProtoMessage() {} +func (*DeviceProperties) Descriptor() ([]byte, []int) { + return fileDescriptor_07c4fdb3c62f9732, []int{0} +} + +func (m *DeviceProperties) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeviceProperties.Unmarshal(m, b) +} +func (m *DeviceProperties) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeviceProperties.Marshal(b, m, deterministic) +} +func (m *DeviceProperties) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceProperties.Merge(m, src) +} +func (m *DeviceProperties) XXX_Size() int { + return xxx_messageInfo_DeviceProperties.Size(m) +} +func (m *DeviceProperties) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceProperties.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceProperties proto.InternalMessageInfo + +func (m *DeviceProperties) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *DeviceProperties) GetVendor() string { + if m != nil { + return m.Vendor + } + return "" +} + +func (m *DeviceProperties) GetModel() string { + if m != nil { + return m.Model + } + return "" +} + +func (m *DeviceProperties) GetFrequency() int64 { + if m != nil { + return m.Frequency + } + return 0 +} + +func (m *DeviceProperties) GetNumCores() int64 { + if m != nil { + return m.NumCores + } + return 0 +} + +func (m *DeviceProperties) GetEnvironment() map[string]string { + if m != nil { + return m.Environment + } + return nil +} + +func (m *DeviceProperties) GetNumRegisters() int64 { + if m != nil { + return m.NumRegisters + } + return 0 +} + +func (m *DeviceProperties) GetL1CacheSize() int64 { + if m != nil { + return m.L1CacheSize + } + return 0 +} + +func (m *DeviceProperties) GetL2CacheSize() int64 { + if m != nil { + return m.L2CacheSize + } + return 0 +} + +func (m *DeviceProperties) GetL3CacheSize() int64 { + if m != nil { + return m.L3CacheSize + } + return 0 +} + +func (m *DeviceProperties) GetSharedMemorySizePerMultiprocessor() int64 { + if m != nil { + return m.SharedMemorySizePerMultiprocessor + } + return 0 +} + +func (m *DeviceProperties) GetMemorySize() int64 { + if m != nil { + return m.MemorySize + } + return 0 +} + +func (m *DeviceProperties) GetBandwidth() int64 { + if m != nil { + return m.Bandwidth + } + return 0 +} + +type NamedDevice struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Properties *DeviceProperties `protobuf:"bytes,2,opt,name=properties,proto3" json:"properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedDevice) Reset() { *m = NamedDevice{} } +func (m *NamedDevice) String() string { return proto.CompactTextString(m) } +func (*NamedDevice) ProtoMessage() {} +func (*NamedDevice) Descriptor() ([]byte, []int) { + return fileDescriptor_07c4fdb3c62f9732, []int{1} +} + +func (m *NamedDevice) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedDevice.Unmarshal(m, b) +} +func (m *NamedDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedDevice.Marshal(b, m, deterministic) +} +func (m *NamedDevice) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedDevice.Merge(m, src) +} +func (m *NamedDevice) XXX_Size() int { + return xxx_messageInfo_NamedDevice.Size(m) +} +func (m *NamedDevice) XXX_DiscardUnknown() { + xxx_messageInfo_NamedDevice.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedDevice proto.InternalMessageInfo + +func (m *NamedDevice) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedDevice) GetProperties() *DeviceProperties { + if m != nil { + return m.Properties + } + return nil +} + +func init() { + proto.RegisterType((*DeviceProperties)(nil), "tensorflow.DeviceProperties") + proto.RegisterMapType((map[string]string)(nil), "tensorflow.DeviceProperties.EnvironmentEntry") + proto.RegisterType((*NamedDevice)(nil), "tensorflow.NamedDevice") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/device_properties.proto", fileDescriptor_07c4fdb3c62f9732) +} + +var fileDescriptor_07c4fdb3c62f9732 = []byte{ + // 453 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0xe5, 0x26, 0x0d, 0xcd, 0xb8, 0x91, 0xa2, 0x15, 0xaa, 0x56, 0x50, 0x89, 0x10, 0x84, + 0x94, 0x0b, 0x0e, 0x4d, 0x2e, 0x08, 0x55, 0x1c, 0x5a, 0x7a, 0x2c, 0x44, 0xe6, 0x04, 0x17, 0xcb, + 0xb1, 0x27, 0xc9, 0x0a, 0xef, 0xae, 0xd9, 0x5d, 0xa7, 0x72, 0x1f, 0x8f, 0xa7, 0xe2, 0x88, 0x76, + 0x6d, 0x6c, 0x27, 0x42, 0xbd, 0xcd, 0xfc, 0xf3, 0xcd, 0x78, 0xe4, 0xf9, 0x17, 0xde, 0x1b, 0x14, + 0x5a, 0xaa, 0x4d, 0x26, 0x1f, 0xe6, 0x89, 0x54, 0x38, 0xcf, 0x95, 0x34, 0x72, 0x5d, 0x6c, 0xe6, + 0x29, 0xee, 0x59, 0x82, 0x51, 0xae, 0x64, 0x8e, 0xca, 0x30, 0xd4, 0x81, 0x2b, 0x11, 0x68, 0x3b, + 0xa6, 0xbf, 0xfb, 0x30, 0xfe, 0xec, 0xb8, 0x55, 0x83, 0x11, 0x02, 0x7d, 0x53, 0xe6, 0x48, 0xbd, + 0x89, 0x37, 0x1b, 0x86, 0x2e, 0x26, 0x17, 0x30, 0xd8, 0xa3, 0x48, 0xa5, 0xa2, 0x27, 0x4e, 0xad, + 0x33, 0xf2, 0x1c, 0x4e, 0xb9, 0x4c, 0x31, 0xa3, 0x3d, 0x27, 0x57, 0x09, 0xb9, 0x84, 0xe1, 0x46, + 0xe1, 0xaf, 0x02, 0x45, 0x52, 0xd2, 0xfe, 0xc4, 0x9b, 0xf5, 0xc2, 0x56, 0x20, 0x2f, 0x61, 0x28, + 0x0a, 0x1e, 0xd9, 0x6d, 0x35, 0x3d, 0x75, 0xd5, 0x33, 0x51, 0xf0, 0x5b, 0x9b, 0x93, 0xaf, 0xe0, + 0xa3, 0xd8, 0x33, 0x25, 0x05, 0x47, 0x61, 0xe8, 0x60, 0xd2, 0x9b, 0xf9, 0x8b, 0x77, 0x41, 0xbb, + 0x73, 0x70, 0xbc, 0x6f, 0x70, 0xd7, 0xf2, 0x77, 0xc2, 0xa8, 0x32, 0xec, 0x4e, 0x20, 0x6f, 0x60, + 0x64, 0xbf, 0xa6, 0x70, 0xcb, 0xb4, 0x41, 0xa5, 0xe9, 0x33, 0xf7, 0xc5, 0x73, 0x51, 0xf0, 0xf0, + 0x9f, 0x46, 0xa6, 0x30, 0xca, 0xae, 0xa2, 0x24, 0x4e, 0x76, 0x18, 0x69, 0xf6, 0x88, 0xf4, 0xcc, + 0x41, 0x7e, 0x76, 0x75, 0x6b, 0xb5, 0x6f, 0xec, 0x11, 0x1d, 0xb3, 0xe8, 0x32, 0xc3, 0x9a, 0x59, + 0x1c, 0x32, 0xcb, 0x2e, 0x03, 0x35, 0xb3, 0x6c, 0x99, 0x15, 0xbc, 0xd5, 0xbb, 0x58, 0x61, 0x1a, + 0x71, 0xe4, 0x52, 0x95, 0x0e, 0x8c, 0x72, 0x54, 0x11, 0x2f, 0x32, 0xc3, 0x72, 0x25, 0x13, 0xd4, + 0x5a, 0x2a, 0xea, 0xbb, 0xde, 0xd7, 0x15, 0x7c, 0xef, 0x58, 0x3b, 0x60, 0x85, 0xea, 0xfe, 0x00, + 0x24, 0xaf, 0xc0, 0xef, 0x8c, 0xa2, 0xe7, 0xae, 0x0f, 0x78, 0xd3, 0x61, 0xef, 0xb1, 0x8e, 0x45, + 0xfa, 0xc0, 0x52, 0xb3, 0xa3, 0xa3, 0xea, 0x1e, 0x8d, 0xf0, 0xe2, 0x13, 0x8c, 0x8f, 0x7f, 0x21, + 0x19, 0x43, 0xef, 0x27, 0x96, 0xb5, 0x05, 0x6c, 0x68, 0x2f, 0xbd, 0x8f, 0xb3, 0x02, 0x6b, 0x03, + 0x54, 0xc9, 0xc7, 0x93, 0x0f, 0xde, 0x34, 0x02, 0xff, 0x4b, 0xcc, 0x31, 0xad, 0x0e, 0x63, 0xed, + 0x23, 0x62, 0xde, 0xd8, 0xc7, 0xc6, 0xe4, 0x1a, 0xa0, 0xf5, 0xa1, 0x9b, 0xe0, 0x2f, 0x2e, 0x9f, + 0x3a, 0x6a, 0xd8, 0xe1, 0x6f, 0xbe, 0xdf, 0x5c, 0x1c, 0xd7, 0x57, 0xd6, 0xca, 0xfa, 0xc7, 0xf5, + 0x96, 0x99, 0x5d, 0xb1, 0x0e, 0x12, 0xc9, 0xe7, 0x9d, 0x87, 0xf0, 0xff, 0x70, 0x2b, 0x0f, 0x5f, + 0xc8, 0x1f, 0xcf, 0x5b, 0x0f, 0x5c, 0xb2, 0xfc, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x51, 0x31, 0x66, + 0x78, 0x47, 0x03, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/device_properties.proto b/executor/proto/tensorflow/core/protobuf/device_properties.proto new file mode 100644 index 0000000000..11e1258e75 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/device_properties.proto @@ -0,0 +1,57 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "DevicePropertiesProtos"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf"; + +message DeviceProperties { + // Device type (CPU, GPU, ...) + string type = 1; + // Vendor (Intel, nvidia, ...) + string vendor = 2; + // Model (Haswell, K40, ...) + string model = 3; + // Core Frequency in Mhz + int64 frequency = 4; + // Number of cores + int64 num_cores = 5; + // Version of the tools and libraries used with this device (e.g. gcc 4.9, + // cudnn 5.1) + map environment = 6; + // Number of registers per core. + int64 num_registers = 7; + // L1 cache size in bytes + int64 l1_cache_size = 8; + // L2 cache size in bytes + int64 l2_cache_size = 9; + // L3 cache size in bytes + int64 l3_cache_size = 10; + // Shared memory size per multiprocessor in bytes. This field is + // applicable to GPUs only. + int64 shared_memory_size_per_multiprocessor = 11; + // Memory size in bytes + int64 memory_size = 12; + // Memory bandwidth in KB/s + int64 bandwidth = 13; +} + +message NamedDevice { + string name = 1; + DeviceProperties properties = 2; +} diff --git a/executor/proto/tensorflow/core/protobuf/dnn.pb.go b/executor/proto/tensorflow/core/protobuf/dnn.pb.go new file mode 100644 index 0000000000..69b91a2886 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/dnn.pb.go @@ -0,0 +1,580 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/stream_executor/dnn.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Specifies the data type used by an operation. +type DataType int32 + +const ( + DataType_kFloat DataType = 0 + DataType_kDouble DataType = 1 + DataType_kHalf DataType = 2 + DataType_kInt8 DataType = 3 + DataType_kInt32 DataType = 4 +) + +var DataType_name = map[int32]string{ + 0: "kFloat", + 1: "kDouble", + 2: "kHalf", + 3: "kInt8", + 4: "kInt32", +} + +var DataType_value = map[string]int32{ + "kFloat": 0, + "kDouble": 1, + "kHalf": 2, + "kInt8": 3, + "kInt32": 4, +} + +func (x DataType) String() string { + return proto.EnumName(DataType_name, int32(x)) +} + +func (DataType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_768c61f2a579ee6a, []int{0} +} + +// Describes how a convolution input or output layer's data is formatted. +type DataLayout int32 + +const ( + // Naming convention: + // Y <-> row or height + // X <-> column or width + // Batch <-> batch, or N + // Depth <-> feature, or channel + // TODO(timshen): turn them into cuDNN names, e.g. kNCHW. + DataLayout_kYXDepthBatch DataLayout = 0 + DataLayout_kYXBatchDepth DataLayout = 1 + DataLayout_kBatchYXDepth DataLayout = 2 + DataLayout_kBatchDepthYX DataLayout = 3 + DataLayout_kBatchDepthYX4 DataLayout = 4 +) + +var DataLayout_name = map[int32]string{ + 0: "kYXDepthBatch", + 1: "kYXBatchDepth", + 2: "kBatchYXDepth", + 3: "kBatchDepthYX", + 4: "kBatchDepthYX4", +} + +var DataLayout_value = map[string]int32{ + "kYXDepthBatch": 0, + "kYXBatchDepth": 1, + "kBatchYXDepth": 2, + "kBatchDepthYX": 3, + "kBatchDepthYX4": 4, +} + +func (x DataLayout) String() string { + return proto.EnumName(DataLayout_name, int32(x)) +} + +func (DataLayout) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_768c61f2a579ee6a, []int{1} +} + +// Describes how a convolution filter is laid out in the memory. +type FilterLayout int32 + +const ( + // Naming convention: + // Y <-> row or height + // X <-> column or width + // Output <-> output feature, or N + // Input <-> input feature, or N + // TODO(timshen): turn them into cuDNN names, e.g. kNCHW. + FilterLayout_kOutputInputYX FilterLayout = 0 + FilterLayout_kOutputYXInput FilterLayout = 1 + FilterLayout_kOutputInputYX4 FilterLayout = 2 + FilterLayout_kInputYXOutput FilterLayout = 3 + FilterLayout_kYXInputOutput FilterLayout = 4 +) + +var FilterLayout_name = map[int32]string{ + 0: "kOutputInputYX", + 1: "kOutputYXInput", + 2: "kOutputInputYX4", + 3: "kInputYXOutput", + 4: "kYXInputOutput", +} + +var FilterLayout_value = map[string]int32{ + "kOutputInputYX": 0, + "kOutputYXInput": 1, + "kOutputInputYX4": 2, + "kInputYXOutput": 3, + "kYXInputOutput": 4, +} + +func (x FilterLayout) String() string { + return proto.EnumName(FilterLayout_name, int32(x)) +} + +func (FilterLayout) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_768c61f2a579ee6a, []int{2} +} + +// Describes a kind of non-linearity (threshold-like mathematical function). +type ActivationMode int32 + +const ( + ActivationMode_kNone ActivationMode = 0 + ActivationMode_kSigmoid ActivationMode = 1 + // Rectified linear activation: f(x) = x < 0 ? 0 : x + ActivationMode_kRelu ActivationMode = 2 + // Rectified linear activation; where upper maximum is 6.0. + ActivationMode_kRelu6 ActivationMode = 3 + // Rectified linear activation; where upper maximum specified by + // BatchDescriptor::value_max(). + ActivationMode_kReluX ActivationMode = 4 + ActivationMode_kTanh ActivationMode = 5 + // Like ReluX; but passes all values in the range [-X,X]. + ActivationMode_kBandPass ActivationMode = 6 +) + +var ActivationMode_name = map[int32]string{ + 0: "kNone", + 1: "kSigmoid", + 2: "kRelu", + 3: "kRelu6", + 4: "kReluX", + 5: "kTanh", + 6: "kBandPass", +} + +var ActivationMode_value = map[string]int32{ + "kNone": 0, + "kSigmoid": 1, + "kRelu": 2, + "kRelu6": 3, + "kReluX": 4, + "kTanh": 5, + "kBandPass": 6, +} + +func (x ActivationMode) String() string { + return proto.EnumName(ActivationMode_name, int32(x)) +} + +func (ActivationMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_768c61f2a579ee6a, []int{3} +} + +// Describe the math definition for the conv op. The popular behavior is +// actually called cross-correlation in math, despite the operation is often +// referred as convolution. See cuDNN cudnnConvolutionMode_t. +type ConvolutionMode int32 + +const ( + ConvolutionMode_CROSS_CORRELATION ConvolutionMode = 0 + ConvolutionMode_CONVOLUTION ConvolutionMode = 1 +) + +var ConvolutionMode_name = map[int32]string{ + 0: "CROSS_CORRELATION", + 1: "CONVOLUTION", +} + +var ConvolutionMode_value = map[string]int32{ + "CROSS_CORRELATION": 0, + "CONVOLUTION": 1, +} + +func (x ConvolutionMode) String() string { + return proto.EnumName(ConvolutionMode_name, int32(x)) +} + +func (ConvolutionMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_768c61f2a579ee6a, []int{4} +} + +type ConvolutionKind int32 + +const ( + ConvolutionKind_INVALID ConvolutionKind = 0 + ConvolutionKind_FORWARD ConvolutionKind = 1 + ConvolutionKind_BACKWARD_FILTER ConvolutionKind = 2 + ConvolutionKind_BACKWARD_DATA ConvolutionKind = 3 + ConvolutionKind_FORWARD_BIAS_ACTIVATION ConvolutionKind = 4 +) + +var ConvolutionKind_name = map[int32]string{ + 0: "INVALID", + 1: "FORWARD", + 2: "BACKWARD_FILTER", + 3: "BACKWARD_DATA", + 4: "FORWARD_BIAS_ACTIVATION", +} + +var ConvolutionKind_value = map[string]int32{ + "INVALID": 0, + "FORWARD": 1, + "BACKWARD_FILTER": 2, + "BACKWARD_DATA": 3, + "FORWARD_BIAS_ACTIVATION": 4, +} + +func (x ConvolutionKind) String() string { + return proto.EnumName(ConvolutionKind_name, int32(x)) +} + +func (ConvolutionKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_768c61f2a579ee6a, []int{5} +} + +type AlgorithmProto_MathType int32 + +const ( + AlgorithmProto_DEFAULT_MATH AlgorithmProto_MathType = 0 + // The GPU may operate 4x4 matrix FMA. + // See cuDNN's documentation for CUDNN_TENSOR_OP_MATH. + AlgorithmProto_TENSOR_OP_MATH AlgorithmProto_MathType = 1 +) + +var AlgorithmProto_MathType_name = map[int32]string{ + 0: "DEFAULT_MATH", + 1: "TENSOR_OP_MATH", +} + +var AlgorithmProto_MathType_value = map[string]int32{ + "DEFAULT_MATH": 0, + "TENSOR_OP_MATH": 1, +} + +func (x AlgorithmProto_MathType) String() string { + return proto.EnumName(AlgorithmProto_MathType_name, int32(x)) +} + +func (AlgorithmProto_MathType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_768c61f2a579ee6a, []int{1, 0} +} + +// Generic tensor representation. +type TensorDescriptorProto struct { + Dimensions []int64 `protobuf:"varint,1,rep,packed,name=dimensions,proto3" json:"dimensions,omitempty"` + DataType DataType `protobuf:"varint,2,opt,name=data_type,json=dataType,proto3,enum=stream_executor.dnn.DataType" json:"data_type,omitempty"` + // Types that are valid to be assigned to LayoutOneof: + // *TensorDescriptorProto_DataLayout + // *TensorDescriptorProto_FilterLayout + LayoutOneof isTensorDescriptorProto_LayoutOneof `protobuf_oneof:"layout_oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TensorDescriptorProto) Reset() { *m = TensorDescriptorProto{} } +func (m *TensorDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*TensorDescriptorProto) ProtoMessage() {} +func (*TensorDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_768c61f2a579ee6a, []int{0} +} + +func (m *TensorDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TensorDescriptorProto.Unmarshal(m, b) +} +func (m *TensorDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TensorDescriptorProto.Marshal(b, m, deterministic) +} +func (m *TensorDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_TensorDescriptorProto.Merge(m, src) +} +func (m *TensorDescriptorProto) XXX_Size() int { + return xxx_messageInfo_TensorDescriptorProto.Size(m) +} +func (m *TensorDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_TensorDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_TensorDescriptorProto proto.InternalMessageInfo + +func (m *TensorDescriptorProto) GetDimensions() []int64 { + if m != nil { + return m.Dimensions + } + return nil +} + +func (m *TensorDescriptorProto) GetDataType() DataType { + if m != nil { + return m.DataType + } + return DataType_kFloat +} + +type isTensorDescriptorProto_LayoutOneof interface { + isTensorDescriptorProto_LayoutOneof() +} + +type TensorDescriptorProto_DataLayout struct { + DataLayout DataLayout `protobuf:"varint,3,opt,name=data_layout,json=dataLayout,proto3,enum=stream_executor.dnn.DataLayout,oneof"` +} + +type TensorDescriptorProto_FilterLayout struct { + FilterLayout FilterLayout `protobuf:"varint,4,opt,name=filter_layout,json=filterLayout,proto3,enum=stream_executor.dnn.FilterLayout,oneof"` +} + +func (*TensorDescriptorProto_DataLayout) isTensorDescriptorProto_LayoutOneof() {} + +func (*TensorDescriptorProto_FilterLayout) isTensorDescriptorProto_LayoutOneof() {} + +func (m *TensorDescriptorProto) GetLayoutOneof() isTensorDescriptorProto_LayoutOneof { + if m != nil { + return m.LayoutOneof + } + return nil +} + +func (m *TensorDescriptorProto) GetDataLayout() DataLayout { + if x, ok := m.GetLayoutOneof().(*TensorDescriptorProto_DataLayout); ok { + return x.DataLayout + } + return DataLayout_kYXDepthBatch +} + +func (m *TensorDescriptorProto) GetFilterLayout() FilterLayout { + if x, ok := m.GetLayoutOneof().(*TensorDescriptorProto_FilterLayout); ok { + return x.FilterLayout + } + return FilterLayout_kOutputInputYX +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*TensorDescriptorProto) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*TensorDescriptorProto_DataLayout)(nil), + (*TensorDescriptorProto_FilterLayout)(nil), + } +} + +// Generic algorithm representation. +type AlgorithmProto struct { + AlgoId int64 `protobuf:"varint,1,opt,name=algo_id,json=algoId,proto3" json:"algo_id,omitempty"` + MathType AlgorithmProto_MathType `protobuf:"varint,2,opt,name=math_type,json=mathType,proto3,enum=stream_executor.dnn.AlgorithmProto_MathType" json:"math_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlgorithmProto) Reset() { *m = AlgorithmProto{} } +func (m *AlgorithmProto) String() string { return proto.CompactTextString(m) } +func (*AlgorithmProto) ProtoMessage() {} +func (*AlgorithmProto) Descriptor() ([]byte, []int) { + return fileDescriptor_768c61f2a579ee6a, []int{1} +} + +func (m *AlgorithmProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AlgorithmProto.Unmarshal(m, b) +} +func (m *AlgorithmProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AlgorithmProto.Marshal(b, m, deterministic) +} +func (m *AlgorithmProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlgorithmProto.Merge(m, src) +} +func (m *AlgorithmProto) XXX_Size() int { + return xxx_messageInfo_AlgorithmProto.Size(m) +} +func (m *AlgorithmProto) XXX_DiscardUnknown() { + xxx_messageInfo_AlgorithmProto.DiscardUnknown(m) +} + +var xxx_messageInfo_AlgorithmProto proto.InternalMessageInfo + +func (m *AlgorithmProto) GetAlgoId() int64 { + if m != nil { + return m.AlgoId + } + return 0 +} + +func (m *AlgorithmProto) GetMathType() AlgorithmProto_MathType { + if m != nil { + return m.MathType + } + return AlgorithmProto_DEFAULT_MATH +} + +// Convolution-specific parameters. +type ConvolutionDescriptorProto struct { + Paddings []int64 `protobuf:"varint,1,rep,packed,name=paddings,proto3" json:"paddings,omitempty"` + Strides []int64 `protobuf:"varint,2,rep,packed,name=strides,proto3" json:"strides,omitempty"` + Dilations []int64 `protobuf:"varint,3,rep,packed,name=dilations,proto3" json:"dilations,omitempty"` + // The "accumulator" type. For example, use F32 as an accumulator for F16 + // convolutions. + // See cuDNN's cudnnConvolutionMode_t. + ComputeMode DataType `protobuf:"varint,4,opt,name=compute_mode,json=computeMode,proto3,enum=stream_executor.dnn.DataType" json:"compute_mode,omitempty"` + // See cuDNN's group count. + GroupCount int32 `protobuf:"varint,5,opt,name=group_count,json=groupCount,proto3" json:"group_count,omitempty"` + ConvolutionMode ConvolutionMode `protobuf:"varint,6,opt,name=convolution_mode,json=convolutionMode,proto3,enum=stream_executor.dnn.ConvolutionMode" json:"convolution_mode,omitempty"` + // Tensorflow node name, same as in NodeDef, for debugging purposes. + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConvolutionDescriptorProto) Reset() { *m = ConvolutionDescriptorProto{} } +func (m *ConvolutionDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ConvolutionDescriptorProto) ProtoMessage() {} +func (*ConvolutionDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_768c61f2a579ee6a, []int{2} +} + +func (m *ConvolutionDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConvolutionDescriptorProto.Unmarshal(m, b) +} +func (m *ConvolutionDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConvolutionDescriptorProto.Marshal(b, m, deterministic) +} +func (m *ConvolutionDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConvolutionDescriptorProto.Merge(m, src) +} +func (m *ConvolutionDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ConvolutionDescriptorProto.Size(m) +} +func (m *ConvolutionDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ConvolutionDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ConvolutionDescriptorProto proto.InternalMessageInfo + +func (m *ConvolutionDescriptorProto) GetPaddings() []int64 { + if m != nil { + return m.Paddings + } + return nil +} + +func (m *ConvolutionDescriptorProto) GetStrides() []int64 { + if m != nil { + return m.Strides + } + return nil +} + +func (m *ConvolutionDescriptorProto) GetDilations() []int64 { + if m != nil { + return m.Dilations + } + return nil +} + +func (m *ConvolutionDescriptorProto) GetComputeMode() DataType { + if m != nil { + return m.ComputeMode + } + return DataType_kFloat +} + +func (m *ConvolutionDescriptorProto) GetGroupCount() int32 { + if m != nil { + return m.GroupCount + } + return 0 +} + +func (m *ConvolutionDescriptorProto) GetConvolutionMode() ConvolutionMode { + if m != nil { + return m.ConvolutionMode + } + return ConvolutionMode_CROSS_CORRELATION +} + +func (m *ConvolutionDescriptorProto) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterEnum("stream_executor.dnn.DataType", DataType_name, DataType_value) + proto.RegisterEnum("stream_executor.dnn.DataLayout", DataLayout_name, DataLayout_value) + proto.RegisterEnum("stream_executor.dnn.FilterLayout", FilterLayout_name, FilterLayout_value) + proto.RegisterEnum("stream_executor.dnn.ActivationMode", ActivationMode_name, ActivationMode_value) + proto.RegisterEnum("stream_executor.dnn.ConvolutionMode", ConvolutionMode_name, ConvolutionMode_value) + proto.RegisterEnum("stream_executor.dnn.ConvolutionKind", ConvolutionKind_name, ConvolutionKind_value) + proto.RegisterEnum("stream_executor.dnn.AlgorithmProto_MathType", AlgorithmProto_MathType_name, AlgorithmProto_MathType_value) + proto.RegisterType((*TensorDescriptorProto)(nil), "stream_executor.dnn.TensorDescriptorProto") + proto.RegisterType((*AlgorithmProto)(nil), "stream_executor.dnn.AlgorithmProto") + proto.RegisterType((*ConvolutionDescriptorProto)(nil), "stream_executor.dnn.ConvolutionDescriptorProto") +} + +func init() { + proto.RegisterFile("tensorflow/stream_executor/dnn.proto", fileDescriptor_768c61f2a579ee6a) +} + +var fileDescriptor_768c61f2a579ee6a = []byte{ + // 759 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x94, 0xdd, 0x8e, 0xda, 0x46, + 0x14, 0xc7, 0x31, 0xb0, 0x2c, 0x1c, 0x58, 0x76, 0x32, 0x51, 0x14, 0x2b, 0xfd, 0x08, 0x5d, 0xe5, + 0x02, 0xa1, 0x8a, 0x54, 0x49, 0x54, 0xb5, 0xbd, 0xaa, 0xc1, 0xa0, 0xb5, 0xc2, 0xe2, 0x95, 0x71, + 0xb6, 0xec, 0x95, 0x35, 0xf1, 0x0c, 0x60, 0x61, 0xcf, 0x58, 0xf6, 0x38, 0xed, 0x3e, 0x46, 0x5f, + 0xa2, 0xcf, 0xd0, 0xc7, 0xab, 0x3c, 0x36, 0x2c, 0xac, 0xa8, 0x72, 0x77, 0xce, 0x6f, 0xce, 0xc7, + 0xf8, 0xff, 0x1f, 0x19, 0xde, 0x48, 0xc6, 0x53, 0x91, 0xac, 0x42, 0xf1, 0xe7, 0xdb, 0x54, 0x26, + 0x8c, 0x44, 0x1e, 0xfb, 0x8b, 0xf9, 0x99, 0x14, 0xc9, 0x5b, 0xca, 0xf9, 0x30, 0x4e, 0x84, 0x14, + 0xf8, 0xf9, 0x93, 0xa3, 0x21, 0xe5, 0xfc, 0xea, 0xef, 0x2a, 0xbc, 0x70, 0x55, 0xb7, 0xc9, 0x52, + 0x3f, 0x09, 0x62, 0x29, 0x92, 0x5b, 0x55, 0xfe, 0x3d, 0x00, 0x0d, 0x22, 0xc6, 0xd3, 0x40, 0xf0, + 0x54, 0xd7, 0x7a, 0xb5, 0x7e, 0xcd, 0x39, 0x20, 0xf8, 0x37, 0x68, 0x51, 0x22, 0x89, 0x27, 0x1f, + 0x62, 0xa6, 0x57, 0x7b, 0x5a, 0xbf, 0xfb, 0xee, 0xbb, 0xe1, 0x89, 0x15, 0x43, 0x93, 0x48, 0xe2, + 0x3e, 0xc4, 0xcc, 0x69, 0xd2, 0x32, 0xc2, 0x23, 0x68, 0xab, 0xde, 0x90, 0x3c, 0x88, 0x4c, 0xea, + 0x35, 0xd5, 0xfd, 0xfa, 0x7f, 0xbb, 0x67, 0xaa, 0xec, 0xba, 0xe2, 0x00, 0xdd, 0x67, 0xf8, 0x1a, + 0x2e, 0x56, 0x41, 0x28, 0x59, 0xb2, 0x9b, 0x52, 0x57, 0x53, 0x7e, 0x38, 0x39, 0x65, 0xaa, 0x2a, + 0xf7, 0x73, 0x3a, 0xab, 0x83, 0x7c, 0xd4, 0x85, 0x4e, 0x31, 0xc2, 0x13, 0x9c, 0x89, 0xd5, 0xd5, + 0x3f, 0x1a, 0x74, 0x8d, 0x70, 0x2d, 0x92, 0x40, 0x6e, 0xa2, 0x42, 0x8c, 0x97, 0x70, 0x4e, 0xc2, + 0xb5, 0xf0, 0x02, 0xaa, 0x6b, 0x3d, 0xad, 0x5f, 0x73, 0x1a, 0x79, 0x6a, 0x51, 0x6c, 0x41, 0x2b, + 0x22, 0x72, 0x73, 0xa8, 0xc2, 0x8f, 0x27, 0x6f, 0x70, 0x3c, 0x70, 0x78, 0x43, 0xe4, 0xa6, 0x10, + 0x25, 0x2a, 0xa3, 0xab, 0x9f, 0xa0, 0xb9, 0xa3, 0x18, 0x41, 0xc7, 0x9c, 0x4c, 0x8d, 0x4f, 0x33, + 0xd7, 0xbb, 0x31, 0xdc, 0x6b, 0x54, 0xc1, 0x18, 0xba, 0xee, 0x64, 0xbe, 0xb0, 0x1d, 0xcf, 0xbe, + 0x2d, 0x98, 0x76, 0xf5, 0x6f, 0x15, 0x5e, 0x8d, 0x05, 0xff, 0x22, 0xc2, 0x4c, 0x06, 0x82, 0x3f, + 0x75, 0xf0, 0x15, 0x34, 0x63, 0x42, 0x69, 0xc0, 0xd7, 0x3b, 0xff, 0xf6, 0x39, 0xd6, 0xe1, 0x3c, + 0x95, 0x49, 0x40, 0x59, 0xaa, 0x57, 0xd5, 0xd1, 0x2e, 0xc5, 0xdf, 0x42, 0x8b, 0x06, 0x21, 0x91, + 0xca, 0xf6, 0x9a, 0x3a, 0x7b, 0x04, 0xf8, 0x77, 0xe8, 0xf8, 0x22, 0x8a, 0x33, 0xc9, 0xbc, 0x48, + 0x50, 0x56, 0x8a, 0xfe, 0x15, 0xe3, 0xdb, 0x65, 0xcb, 0x8d, 0xa0, 0x0c, 0xbf, 0x86, 0xf6, 0x3a, + 0x11, 0x59, 0xec, 0xf9, 0x22, 0xe3, 0x52, 0x3f, 0xeb, 0x69, 0xfd, 0x33, 0x07, 0x14, 0x1a, 0xe7, + 0x04, 0xdb, 0x80, 0xfc, 0xc7, 0x8f, 0x2a, 0xd6, 0x34, 0xd4, 0x9a, 0x37, 0x27, 0xd7, 0x1c, 0x28, + 0x90, 0x2f, 0x70, 0x2e, 0xfd, 0x63, 0x80, 0x31, 0xd4, 0x39, 0x89, 0x98, 0x7e, 0xde, 0xd3, 0xfa, + 0x2d, 0x47, 0xc5, 0x83, 0x09, 0x34, 0x77, 0xd7, 0xc3, 0x00, 0x8d, 0xed, 0x34, 0x14, 0x44, 0xa2, + 0x0a, 0x6e, 0xc3, 0xf9, 0xd6, 0x14, 0xd9, 0xe7, 0x90, 0x21, 0x0d, 0xb7, 0xe0, 0x6c, 0x7b, 0x4d, + 0xc2, 0x15, 0xaa, 0xaa, 0xd0, 0xe2, 0xf2, 0x17, 0x54, 0x53, 0xe5, 0x16, 0x97, 0xef, 0xdf, 0xa1, + 0xfa, 0x20, 0x04, 0x78, 0x7c, 0xa0, 0xf8, 0x19, 0x5c, 0x6c, 0xef, 0x97, 0x26, 0x8b, 0xe5, 0x66, + 0x44, 0xa4, 0xbf, 0x41, 0x95, 0x12, 0xa9, 0x4c, 0x71, 0xa4, 0x29, 0xa4, 0x40, 0x59, 0x8a, 0xaa, + 0x8f, 0x48, 0x81, 0xfb, 0x25, 0xaa, 0xe5, 0x7e, 0x1f, 0xa1, 0x0f, 0xa8, 0x3e, 0x48, 0xa1, 0x73, + 0xf8, 0x90, 0x55, 0x8d, 0x9d, 0xc9, 0x38, 0x93, 0x16, 0x8f, 0x33, 0x79, 0xbf, 0x2c, 0xde, 0x49, + 0xc9, 0xee, 0x97, 0x8a, 0x22, 0x0d, 0x3f, 0x87, 0xcb, 0xe3, 0xba, 0x0f, 0xa8, 0xaa, 0x0a, 0xcb, + 0xb4, 0x38, 0x2b, 0x97, 0x96, 0x6d, 0x25, 0xab, 0x0f, 0x56, 0xd0, 0x35, 0x7c, 0x19, 0x7c, 0x21, + 0x7b, 0x3d, 0x73, 0x2d, 0xe6, 0x82, 0x33, 0x54, 0xc1, 0x1d, 0x68, 0x6e, 0x17, 0xc1, 0x3a, 0x12, + 0x01, 0x2d, 0xf5, 0x72, 0x58, 0x98, 0xa1, 0xaa, 0x12, 0x29, 0x0f, 0x7f, 0x2e, 0x05, 0xcb, 0xe3, + 0x25, 0xaa, 0xab, 0x12, 0x97, 0xf0, 0x0d, 0x3a, 0xc3, 0x17, 0xd0, 0xda, 0x8e, 0x08, 0xa7, 0xb7, + 0x24, 0x4d, 0x51, 0x63, 0xf0, 0x2b, 0x5c, 0x3e, 0x71, 0x12, 0xbf, 0x80, 0x67, 0x63, 0xc7, 0x5e, + 0x2c, 0xbc, 0xb1, 0xed, 0x38, 0x93, 0x99, 0xe1, 0x5a, 0xf6, 0x1c, 0x55, 0xf0, 0x25, 0xb4, 0xc7, + 0xf6, 0xfc, 0xce, 0x9e, 0x7d, 0x52, 0x40, 0x1b, 0xc4, 0x47, 0xad, 0x1f, 0x03, 0x4e, 0x73, 0x1f, + 0xad, 0xf9, 0x9d, 0x31, 0xb3, 0xcc, 0xc2, 0xd4, 0xa9, 0xed, 0xfc, 0x61, 0x38, 0x66, 0x21, 0xc6, + 0xc8, 0x18, 0x7f, 0xcc, 0x33, 0x6f, 0x6a, 0xcd, 0xdc, 0x89, 0x53, 0x18, 0xb0, 0x87, 0xa6, 0xe1, + 0x1a, 0xa8, 0x86, 0xbf, 0x81, 0x97, 0x65, 0x93, 0x37, 0xb2, 0x8c, 0x85, 0x67, 0x8c, 0x5d, 0xeb, + 0xae, 0xb8, 0x42, 0xfd, 0x73, 0x43, 0xfd, 0x52, 0xdf, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x77, + 0x13, 0xd3, 0x97, 0x7a, 0x05, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/eager_service.pb.go b/executor/proto/tensorflow/core/protobuf/eager_service.pb.go new file mode 100644 index 0000000000..760877a9c0 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/eager_service.pb.go @@ -0,0 +1,1107 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/eager_service.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + framework "github.com/tensorflow/tensorflow/tensorflow/go/core/framework" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type RemoteTensorHandle struct { + // The ID of the operation that produced this tensor. + OpId int64 `protobuf:"varint,1,opt,name=op_id,json=opId,proto3" json:"op_id,omitempty"` + // The index into the outputs of the operation that produced this tensor. + OutputNum int32 `protobuf:"varint,2,opt,name=output_num,json=outputNum,proto3" json:"output_num,omitempty"` + // Device of the operation that produced this tensor. Cannot be empty. + // For multi-device functions, it's the default device passed to placer. + Device string `protobuf:"bytes,3,opt,name=device,proto3" json:"device,omitempty"` + // Device where the tensor is located. Can be empty if the operation producing + // this tensor is a multi-device function. + OpDevice string `protobuf:"bytes,4,opt,name=op_device,json=opDevice,proto3" json:"op_device,omitempty"` + // Tensor type. + Dtype framework.DataType `protobuf:"varint,5,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoteTensorHandle) Reset() { *m = RemoteTensorHandle{} } +func (m *RemoteTensorHandle) String() string { return proto.CompactTextString(m) } +func (*RemoteTensorHandle) ProtoMessage() {} +func (*RemoteTensorHandle) Descriptor() ([]byte, []int) { + return fileDescriptor_7f63cfa0a7bc4510, []int{0} +} + +func (m *RemoteTensorHandle) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RemoteTensorHandle.Unmarshal(m, b) +} +func (m *RemoteTensorHandle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RemoteTensorHandle.Marshal(b, m, deterministic) +} +func (m *RemoteTensorHandle) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoteTensorHandle.Merge(m, src) +} +func (m *RemoteTensorHandle) XXX_Size() int { + return xxx_messageInfo_RemoteTensorHandle.Size(m) +} +func (m *RemoteTensorHandle) XXX_DiscardUnknown() { + xxx_messageInfo_RemoteTensorHandle.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoteTensorHandle proto.InternalMessageInfo + +func (m *RemoteTensorHandle) GetOpId() int64 { + if m != nil { + return m.OpId + } + return 0 +} + +func (m *RemoteTensorHandle) GetOutputNum() int32 { + if m != nil { + return m.OutputNum + } + return 0 +} + +func (m *RemoteTensorHandle) GetDevice() string { + if m != nil { + return m.Device + } + return "" +} + +func (m *RemoteTensorHandle) GetOpDevice() string { + if m != nil { + return m.OpDevice + } + return "" +} + +func (m *RemoteTensorHandle) GetDtype() framework.DataType { + if m != nil { + return m.Dtype + } + return framework.DataType_DT_INVALID +} + +// A proto representation of an eager operation. +type Operation struct { + // A unique identifier for the operation. Set by the client so that the client + // can uniquely identify the outputs of the scheduled operation. + // + // In the initial implementation, sending duplicate IDs has undefined + // behaviour, but additional constraints may be placed upon this in the + // future. + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Inputs []*RemoteTensorHandle `protobuf:"bytes,3,rep,name=inputs,proto3" json:"inputs,omitempty"` + // Control Operation IDs that will be respected when ops are re-ordered by + // async execution. If async execution (+ op re-ordering) is not enabled, this + // should have no effect. + ControlOpIds []int64 `protobuf:"varint,4,rep,packed,name=control_op_ids,json=controlOpIds,proto3" json:"control_op_ids,omitempty"` + Attrs map[string]*framework.AttrValue `protobuf:"bytes,5,rep,name=attrs,proto3" json:"attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Device string `protobuf:"bytes,6,opt,name=device,proto3" json:"device,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { + return fileDescriptor_7f63cfa0a7bc4510, []int{1} +} + +func (m *Operation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Operation.Unmarshal(m, b) +} +func (m *Operation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Operation.Marshal(b, m, deterministic) +} +func (m *Operation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Operation.Merge(m, src) +} +func (m *Operation) XXX_Size() int { + return xxx_messageInfo_Operation.Size(m) +} +func (m *Operation) XXX_DiscardUnknown() { + xxx_messageInfo_Operation.DiscardUnknown(m) +} + +var xxx_messageInfo_Operation proto.InternalMessageInfo + +func (m *Operation) GetId() int64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Operation) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Operation) GetInputs() []*RemoteTensorHandle { + if m != nil { + return m.Inputs + } + return nil +} + +func (m *Operation) GetControlOpIds() []int64 { + if m != nil { + return m.ControlOpIds + } + return nil +} + +func (m *Operation) GetAttrs() map[string]*framework.AttrValue { + if m != nil { + return m.Attrs + } + return nil +} + +func (m *Operation) GetDevice() string { + if m != nil { + return m.Device + } + return "" +} + +type QueueItem struct { + // The remote executor should be able to handle either executing ops directly, + // or releasing any unused tensor handles, since the tensor lifetime is + // maintained by the client. + // + // Types that are valid to be assigned to Item: + // *QueueItem_HandleToDecref + // *QueueItem_Operation + // *QueueItem_SendTensor + Item isQueueItem_Item `protobuf_oneof:"item"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueueItem) Reset() { *m = QueueItem{} } +func (m *QueueItem) String() string { return proto.CompactTextString(m) } +func (*QueueItem) ProtoMessage() {} +func (*QueueItem) Descriptor() ([]byte, []int) { + return fileDescriptor_7f63cfa0a7bc4510, []int{2} +} + +func (m *QueueItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueueItem.Unmarshal(m, b) +} +func (m *QueueItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueueItem.Marshal(b, m, deterministic) +} +func (m *QueueItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueueItem.Merge(m, src) +} +func (m *QueueItem) XXX_Size() int { + return xxx_messageInfo_QueueItem.Size(m) +} +func (m *QueueItem) XXX_DiscardUnknown() { + xxx_messageInfo_QueueItem.DiscardUnknown(m) +} + +var xxx_messageInfo_QueueItem proto.InternalMessageInfo + +type isQueueItem_Item interface { + isQueueItem_Item() +} + +type QueueItem_HandleToDecref struct { + HandleToDecref *RemoteTensorHandle `protobuf:"bytes,1,opt,name=handle_to_decref,json=handleToDecref,proto3,oneof"` +} + +type QueueItem_Operation struct { + Operation *Operation `protobuf:"bytes,2,opt,name=operation,proto3,oneof"` +} + +type QueueItem_SendTensor struct { + SendTensor *SendTensorOp `protobuf:"bytes,3,opt,name=send_tensor,json=sendTensor,proto3,oneof"` +} + +func (*QueueItem_HandleToDecref) isQueueItem_Item() {} + +func (*QueueItem_Operation) isQueueItem_Item() {} + +func (*QueueItem_SendTensor) isQueueItem_Item() {} + +func (m *QueueItem) GetItem() isQueueItem_Item { + if m != nil { + return m.Item + } + return nil +} + +func (m *QueueItem) GetHandleToDecref() *RemoteTensorHandle { + if x, ok := m.GetItem().(*QueueItem_HandleToDecref); ok { + return x.HandleToDecref + } + return nil +} + +func (m *QueueItem) GetOperation() *Operation { + if x, ok := m.GetItem().(*QueueItem_Operation); ok { + return x.Operation + } + return nil +} + +func (m *QueueItem) GetSendTensor() *SendTensorOp { + if x, ok := m.GetItem().(*QueueItem_SendTensor); ok { + return x.SendTensor + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*QueueItem) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*QueueItem_HandleToDecref)(nil), + (*QueueItem_Operation)(nil), + (*QueueItem_SendTensor)(nil), + } +} + +type QueueResponse struct { + Shape []*framework.TensorShapeProto `protobuf:"bytes,1,rep,name=shape,proto3" json:"shape,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueueResponse) Reset() { *m = QueueResponse{} } +func (m *QueueResponse) String() string { return proto.CompactTextString(m) } +func (*QueueResponse) ProtoMessage() {} +func (*QueueResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7f63cfa0a7bc4510, []int{3} +} + +func (m *QueueResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueueResponse.Unmarshal(m, b) +} +func (m *QueueResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueueResponse.Marshal(b, m, deterministic) +} +func (m *QueueResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueueResponse.Merge(m, src) +} +func (m *QueueResponse) XXX_Size() int { + return xxx_messageInfo_QueueResponse.Size(m) +} +func (m *QueueResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueueResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueueResponse proto.InternalMessageInfo + +func (m *QueueResponse) GetShape() []*framework.TensorShapeProto { + if m != nil { + return m.Shape + } + return nil +} + +type CreateContextRequest struct { + // Identifies the full cluster, and this particular worker's position within. + ServerDef *ServerDef `protobuf:"bytes,1,opt,name=server_def,json=serverDef,proto3" json:"server_def,omitempty"` + // Whether the ops on the worker should be executed synchronously or + // asynchronously. By default, ops are executed synchronously. + Async bool `protobuf:"varint,2,opt,name=async,proto3" json:"async,omitempty"` + // Number of seconds to keep the context alive. If more than keep_alive_secs + // has passed since a particular context has been communicated with, it will + // be garbage collected. + KeepAliveSecs int64 `protobuf:"varint,3,opt,name=keep_alive_secs,json=keepAliveSecs,proto3" json:"keep_alive_secs,omitempty"` + // This is the version for all the ops that will be enqueued by the client. + VersionDef *framework.VersionDef `protobuf:"bytes,4,opt,name=version_def,json=versionDef,proto3" json:"version_def,omitempty"` + // Device attributes in the cluster + ClusterDeviceAttributes []*framework.DeviceAttributes `protobuf:"bytes,6,rep,name=cluster_device_attributes,json=clusterDeviceAttributes,proto3" json:"cluster_device_attributes,omitempty"` + // The ID of the created context. This is usually a randomly generated number, + // that will be used to identify the context in future requests to the + // service. Contexts are not persisted through server restarts. + // This ID will be used for all future communications as well. It is essential + // that both ends use this ID for selecting a rendezvous to get everything to + // match. + ContextId uint64 `protobuf:"fixed64,7,opt,name=context_id,json=contextId,proto3" json:"context_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateContextRequest) Reset() { *m = CreateContextRequest{} } +func (m *CreateContextRequest) String() string { return proto.CompactTextString(m) } +func (*CreateContextRequest) ProtoMessage() {} +func (*CreateContextRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7f63cfa0a7bc4510, []int{4} +} + +func (m *CreateContextRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateContextRequest.Unmarshal(m, b) +} +func (m *CreateContextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateContextRequest.Marshal(b, m, deterministic) +} +func (m *CreateContextRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateContextRequest.Merge(m, src) +} +func (m *CreateContextRequest) XXX_Size() int { + return xxx_messageInfo_CreateContextRequest.Size(m) +} +func (m *CreateContextRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateContextRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateContextRequest proto.InternalMessageInfo + +func (m *CreateContextRequest) GetServerDef() *ServerDef { + if m != nil { + return m.ServerDef + } + return nil +} + +func (m *CreateContextRequest) GetAsync() bool { + if m != nil { + return m.Async + } + return false +} + +func (m *CreateContextRequest) GetKeepAliveSecs() int64 { + if m != nil { + return m.KeepAliveSecs + } + return 0 +} + +func (m *CreateContextRequest) GetVersionDef() *framework.VersionDef { + if m != nil { + return m.VersionDef + } + return nil +} + +func (m *CreateContextRequest) GetClusterDeviceAttributes() []*framework.DeviceAttributes { + if m != nil { + return m.ClusterDeviceAttributes + } + return nil +} + +func (m *CreateContextRequest) GetContextId() uint64 { + if m != nil { + return m.ContextId + } + return 0 +} + +type CreateContextResponse struct { + // List of devices that are locally accessible to the worker. + DeviceAttributes []*framework.DeviceAttributes `protobuf:"bytes,2,rep,name=device_attributes,json=deviceAttributes,proto3" json:"device_attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateContextResponse) Reset() { *m = CreateContextResponse{} } +func (m *CreateContextResponse) String() string { return proto.CompactTextString(m) } +func (*CreateContextResponse) ProtoMessage() {} +func (*CreateContextResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7f63cfa0a7bc4510, []int{5} +} + +func (m *CreateContextResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateContextResponse.Unmarshal(m, b) +} +func (m *CreateContextResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateContextResponse.Marshal(b, m, deterministic) +} +func (m *CreateContextResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateContextResponse.Merge(m, src) +} +func (m *CreateContextResponse) XXX_Size() int { + return xxx_messageInfo_CreateContextResponse.Size(m) +} +func (m *CreateContextResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateContextResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateContextResponse proto.InternalMessageInfo + +func (m *CreateContextResponse) GetDeviceAttributes() []*framework.DeviceAttributes { + if m != nil { + return m.DeviceAttributes + } + return nil +} + +type EnqueueRequest struct { + ContextId uint64 `protobuf:"fixed64,1,opt,name=context_id,json=contextId,proto3" json:"context_id,omitempty"` + Queue []*QueueItem `protobuf:"bytes,3,rep,name=queue,proto3" json:"queue,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnqueueRequest) Reset() { *m = EnqueueRequest{} } +func (m *EnqueueRequest) String() string { return proto.CompactTextString(m) } +func (*EnqueueRequest) ProtoMessage() {} +func (*EnqueueRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7f63cfa0a7bc4510, []int{6} +} + +func (m *EnqueueRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnqueueRequest.Unmarshal(m, b) +} +func (m *EnqueueRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnqueueRequest.Marshal(b, m, deterministic) +} +func (m *EnqueueRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnqueueRequest.Merge(m, src) +} +func (m *EnqueueRequest) XXX_Size() int { + return xxx_messageInfo_EnqueueRequest.Size(m) +} +func (m *EnqueueRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EnqueueRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_EnqueueRequest proto.InternalMessageInfo + +func (m *EnqueueRequest) GetContextId() uint64 { + if m != nil { + return m.ContextId + } + return 0 +} + +func (m *EnqueueRequest) GetQueue() []*QueueItem { + if m != nil { + return m.Queue + } + return nil +} + +type EnqueueResponse struct { + // A single operation response for every item in the request. + QueueResponse []*QueueResponse `protobuf:"bytes,1,rep,name=queue_response,json=queueResponse,proto3" json:"queue_response,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnqueueResponse) Reset() { *m = EnqueueResponse{} } +func (m *EnqueueResponse) String() string { return proto.CompactTextString(m) } +func (*EnqueueResponse) ProtoMessage() {} +func (*EnqueueResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7f63cfa0a7bc4510, []int{7} +} + +func (m *EnqueueResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnqueueResponse.Unmarshal(m, b) +} +func (m *EnqueueResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnqueueResponse.Marshal(b, m, deterministic) +} +func (m *EnqueueResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnqueueResponse.Merge(m, src) +} +func (m *EnqueueResponse) XXX_Size() int { + return xxx_messageInfo_EnqueueResponse.Size(m) +} +func (m *EnqueueResponse) XXX_DiscardUnknown() { + xxx_messageInfo_EnqueueResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_EnqueueResponse proto.InternalMessageInfo + +func (m *EnqueueResponse) GetQueueResponse() []*QueueResponse { + if m != nil { + return m.QueueResponse + } + return nil +} + +type WaitQueueDoneRequest struct { + ContextId uint64 `protobuf:"fixed64,1,opt,name=context_id,json=contextId,proto3" json:"context_id,omitempty"` + // Ids to wait on. If empty, wait on everything currently pending. + OpId []int64 `protobuf:"varint,2,rep,packed,name=op_id,json=opId,proto3" json:"op_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WaitQueueDoneRequest) Reset() { *m = WaitQueueDoneRequest{} } +func (m *WaitQueueDoneRequest) String() string { return proto.CompactTextString(m) } +func (*WaitQueueDoneRequest) ProtoMessage() {} +func (*WaitQueueDoneRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7f63cfa0a7bc4510, []int{8} +} + +func (m *WaitQueueDoneRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WaitQueueDoneRequest.Unmarshal(m, b) +} +func (m *WaitQueueDoneRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WaitQueueDoneRequest.Marshal(b, m, deterministic) +} +func (m *WaitQueueDoneRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WaitQueueDoneRequest.Merge(m, src) +} +func (m *WaitQueueDoneRequest) XXX_Size() int { + return xxx_messageInfo_WaitQueueDoneRequest.Size(m) +} +func (m *WaitQueueDoneRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WaitQueueDoneRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WaitQueueDoneRequest proto.InternalMessageInfo + +func (m *WaitQueueDoneRequest) GetContextId() uint64 { + if m != nil { + return m.ContextId + } + return 0 +} + +func (m *WaitQueueDoneRequest) GetOpId() []int64 { + if m != nil { + return m.OpId + } + return nil +} + +type WaitQueueDoneResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WaitQueueDoneResponse) Reset() { *m = WaitQueueDoneResponse{} } +func (m *WaitQueueDoneResponse) String() string { return proto.CompactTextString(m) } +func (*WaitQueueDoneResponse) ProtoMessage() {} +func (*WaitQueueDoneResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7f63cfa0a7bc4510, []int{9} +} + +func (m *WaitQueueDoneResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WaitQueueDoneResponse.Unmarshal(m, b) +} +func (m *WaitQueueDoneResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WaitQueueDoneResponse.Marshal(b, m, deterministic) +} +func (m *WaitQueueDoneResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WaitQueueDoneResponse.Merge(m, src) +} +func (m *WaitQueueDoneResponse) XXX_Size() int { + return xxx_messageInfo_WaitQueueDoneResponse.Size(m) +} +func (m *WaitQueueDoneResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WaitQueueDoneResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WaitQueueDoneResponse proto.InternalMessageInfo + +type KeepAliveRequest struct { + ContextId uint64 `protobuf:"fixed64,1,opt,name=context_id,json=contextId,proto3" json:"context_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeepAliveRequest) Reset() { *m = KeepAliveRequest{} } +func (m *KeepAliveRequest) String() string { return proto.CompactTextString(m) } +func (*KeepAliveRequest) ProtoMessage() {} +func (*KeepAliveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7f63cfa0a7bc4510, []int{10} +} + +func (m *KeepAliveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeepAliveRequest.Unmarshal(m, b) +} +func (m *KeepAliveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeepAliveRequest.Marshal(b, m, deterministic) +} +func (m *KeepAliveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeepAliveRequest.Merge(m, src) +} +func (m *KeepAliveRequest) XXX_Size() int { + return xxx_messageInfo_KeepAliveRequest.Size(m) +} +func (m *KeepAliveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_KeepAliveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_KeepAliveRequest proto.InternalMessageInfo + +func (m *KeepAliveRequest) GetContextId() uint64 { + if m != nil { + return m.ContextId + } + return 0 +} + +type KeepAliveResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeepAliveResponse) Reset() { *m = KeepAliveResponse{} } +func (m *KeepAliveResponse) String() string { return proto.CompactTextString(m) } +func (*KeepAliveResponse) ProtoMessage() {} +func (*KeepAliveResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7f63cfa0a7bc4510, []int{11} +} + +func (m *KeepAliveResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeepAliveResponse.Unmarshal(m, b) +} +func (m *KeepAliveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeepAliveResponse.Marshal(b, m, deterministic) +} +func (m *KeepAliveResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeepAliveResponse.Merge(m, src) +} +func (m *KeepAliveResponse) XXX_Size() int { + return xxx_messageInfo_KeepAliveResponse.Size(m) +} +func (m *KeepAliveResponse) XXX_DiscardUnknown() { + xxx_messageInfo_KeepAliveResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_KeepAliveResponse proto.InternalMessageInfo + +type CloseContextRequest struct { + ContextId uint64 `protobuf:"fixed64,1,opt,name=context_id,json=contextId,proto3" json:"context_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseContextRequest) Reset() { *m = CloseContextRequest{} } +func (m *CloseContextRequest) String() string { return proto.CompactTextString(m) } +func (*CloseContextRequest) ProtoMessage() {} +func (*CloseContextRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7f63cfa0a7bc4510, []int{12} +} + +func (m *CloseContextRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseContextRequest.Unmarshal(m, b) +} +func (m *CloseContextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseContextRequest.Marshal(b, m, deterministic) +} +func (m *CloseContextRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseContextRequest.Merge(m, src) +} +func (m *CloseContextRequest) XXX_Size() int { + return xxx_messageInfo_CloseContextRequest.Size(m) +} +func (m *CloseContextRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CloseContextRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseContextRequest proto.InternalMessageInfo + +func (m *CloseContextRequest) GetContextId() uint64 { + if m != nil { + return m.ContextId + } + return 0 +} + +type CloseContextResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseContextResponse) Reset() { *m = CloseContextResponse{} } +func (m *CloseContextResponse) String() string { return proto.CompactTextString(m) } +func (*CloseContextResponse) ProtoMessage() {} +func (*CloseContextResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7f63cfa0a7bc4510, []int{13} +} + +func (m *CloseContextResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseContextResponse.Unmarshal(m, b) +} +func (m *CloseContextResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseContextResponse.Marshal(b, m, deterministic) +} +func (m *CloseContextResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseContextResponse.Merge(m, src) +} +func (m *CloseContextResponse) XXX_Size() int { + return xxx_messageInfo_CloseContextResponse.Size(m) +} +func (m *CloseContextResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CloseContextResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseContextResponse proto.InternalMessageInfo + +type RegisterFunctionRequest struct { + ContextId uint64 `protobuf:"fixed64,1,opt,name=context_id,json=contextId,proto3" json:"context_id,omitempty"` + FunctionDef *framework.FunctionDef `protobuf:"bytes,2,opt,name=function_def,json=functionDef,proto3" json:"function_def,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RegisterFunctionRequest) Reset() { *m = RegisterFunctionRequest{} } +func (m *RegisterFunctionRequest) String() string { return proto.CompactTextString(m) } +func (*RegisterFunctionRequest) ProtoMessage() {} +func (*RegisterFunctionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7f63cfa0a7bc4510, []int{14} +} + +func (m *RegisterFunctionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RegisterFunctionRequest.Unmarshal(m, b) +} +func (m *RegisterFunctionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RegisterFunctionRequest.Marshal(b, m, deterministic) +} +func (m *RegisterFunctionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegisterFunctionRequest.Merge(m, src) +} +func (m *RegisterFunctionRequest) XXX_Size() int { + return xxx_messageInfo_RegisterFunctionRequest.Size(m) +} +func (m *RegisterFunctionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RegisterFunctionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RegisterFunctionRequest proto.InternalMessageInfo + +func (m *RegisterFunctionRequest) GetContextId() uint64 { + if m != nil { + return m.ContextId + } + return 0 +} + +func (m *RegisterFunctionRequest) GetFunctionDef() *framework.FunctionDef { + if m != nil { + return m.FunctionDef + } + return nil +} + +type RegisterFunctionResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RegisterFunctionResponse) Reset() { *m = RegisterFunctionResponse{} } +func (m *RegisterFunctionResponse) String() string { return proto.CompactTextString(m) } +func (*RegisterFunctionResponse) ProtoMessage() {} +func (*RegisterFunctionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7f63cfa0a7bc4510, []int{15} +} + +func (m *RegisterFunctionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RegisterFunctionResponse.Unmarshal(m, b) +} +func (m *RegisterFunctionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RegisterFunctionResponse.Marshal(b, m, deterministic) +} +func (m *RegisterFunctionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegisterFunctionResponse.Merge(m, src) +} +func (m *RegisterFunctionResponse) XXX_Size() int { + return xxx_messageInfo_RegisterFunctionResponse.Size(m) +} +func (m *RegisterFunctionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RegisterFunctionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RegisterFunctionResponse proto.InternalMessageInfo + +type SendTensorOp struct { + // All remote tensors are identified by . To mimic this + // situation when directly sending tensors, we include an "artificial" op ID + // (which would have corresponded to the _Recv op when not using SendTensor). + OpId int64 `protobuf:"varint,1,opt,name=op_id,json=opId,proto3" json:"op_id,omitempty"` + // The index within the repeated field is the output number that will help + // uniquely identify (along with the above op_id) the particular tensor. + Tensors []*framework.TensorProto `protobuf:"bytes,2,rep,name=tensors,proto3" json:"tensors,omitempty"` + // The device on which the tensors should be resident. + DeviceName string `protobuf:"bytes,3,opt,name=device_name,json=deviceName,proto3" json:"device_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SendTensorOp) Reset() { *m = SendTensorOp{} } +func (m *SendTensorOp) String() string { return proto.CompactTextString(m) } +func (*SendTensorOp) ProtoMessage() {} +func (*SendTensorOp) Descriptor() ([]byte, []int) { + return fileDescriptor_7f63cfa0a7bc4510, []int{16} +} + +func (m *SendTensorOp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SendTensorOp.Unmarshal(m, b) +} +func (m *SendTensorOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SendTensorOp.Marshal(b, m, deterministic) +} +func (m *SendTensorOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_SendTensorOp.Merge(m, src) +} +func (m *SendTensorOp) XXX_Size() int { + return xxx_messageInfo_SendTensorOp.Size(m) +} +func (m *SendTensorOp) XXX_DiscardUnknown() { + xxx_messageInfo_SendTensorOp.DiscardUnknown(m) +} + +var xxx_messageInfo_SendTensorOp proto.InternalMessageInfo + +func (m *SendTensorOp) GetOpId() int64 { + if m != nil { + return m.OpId + } + return 0 +} + +func (m *SendTensorOp) GetTensors() []*framework.TensorProto { + if m != nil { + return m.Tensors + } + return nil +} + +func (m *SendTensorOp) GetDeviceName() string { + if m != nil { + return m.DeviceName + } + return "" +} + +type SendTensorRequest struct { + ContextId uint64 `protobuf:"fixed64,1,opt,name=context_id,json=contextId,proto3" json:"context_id,omitempty"` + // All remote tensors are identified by . To mimic this + // situation when directly sending tensors, we include an "artificial" op ID + // (which would have corresponded to the _Recv op when not using SendTensor). + OpId int64 `protobuf:"varint,2,opt,name=op_id,json=opId,proto3" json:"op_id,omitempty"` + // The index within the repeated field is the output number that will help + // uniquely identify (along with the above op_id) the particular tensor. + Tensors []*framework.TensorProto `protobuf:"bytes,3,rep,name=tensors,proto3" json:"tensors,omitempty"` + // The device on which the tensors should be resident. + DeviceName string `protobuf:"bytes,4,opt,name=device_name,json=deviceName,proto3" json:"device_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SendTensorRequest) Reset() { *m = SendTensorRequest{} } +func (m *SendTensorRequest) String() string { return proto.CompactTextString(m) } +func (*SendTensorRequest) ProtoMessage() {} +func (*SendTensorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7f63cfa0a7bc4510, []int{17} +} + +func (m *SendTensorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SendTensorRequest.Unmarshal(m, b) +} +func (m *SendTensorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SendTensorRequest.Marshal(b, m, deterministic) +} +func (m *SendTensorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SendTensorRequest.Merge(m, src) +} +func (m *SendTensorRequest) XXX_Size() int { + return xxx_messageInfo_SendTensorRequest.Size(m) +} +func (m *SendTensorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SendTensorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SendTensorRequest proto.InternalMessageInfo + +func (m *SendTensorRequest) GetContextId() uint64 { + if m != nil { + return m.ContextId + } + return 0 +} + +func (m *SendTensorRequest) GetOpId() int64 { + if m != nil { + return m.OpId + } + return 0 +} + +func (m *SendTensorRequest) GetTensors() []*framework.TensorProto { + if m != nil { + return m.Tensors + } + return nil +} + +func (m *SendTensorRequest) GetDeviceName() string { + if m != nil { + return m.DeviceName + } + return "" +} + +type SendTensorResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SendTensorResponse) Reset() { *m = SendTensorResponse{} } +func (m *SendTensorResponse) String() string { return proto.CompactTextString(m) } +func (*SendTensorResponse) ProtoMessage() {} +func (*SendTensorResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7f63cfa0a7bc4510, []int{18} +} + +func (m *SendTensorResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SendTensorResponse.Unmarshal(m, b) +} +func (m *SendTensorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SendTensorResponse.Marshal(b, m, deterministic) +} +func (m *SendTensorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SendTensorResponse.Merge(m, src) +} +func (m *SendTensorResponse) XXX_Size() int { + return xxx_messageInfo_SendTensorResponse.Size(m) +} +func (m *SendTensorResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SendTensorResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SendTensorResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*RemoteTensorHandle)(nil), "tensorflow.eager.RemoteTensorHandle") + proto.RegisterType((*Operation)(nil), "tensorflow.eager.Operation") + proto.RegisterMapType((map[string]*framework.AttrValue)(nil), "tensorflow.eager.Operation.AttrsEntry") + proto.RegisterType((*QueueItem)(nil), "tensorflow.eager.QueueItem") + proto.RegisterType((*QueueResponse)(nil), "tensorflow.eager.QueueResponse") + proto.RegisterType((*CreateContextRequest)(nil), "tensorflow.eager.CreateContextRequest") + proto.RegisterType((*CreateContextResponse)(nil), "tensorflow.eager.CreateContextResponse") + proto.RegisterType((*EnqueueRequest)(nil), "tensorflow.eager.EnqueueRequest") + proto.RegisterType((*EnqueueResponse)(nil), "tensorflow.eager.EnqueueResponse") + proto.RegisterType((*WaitQueueDoneRequest)(nil), "tensorflow.eager.WaitQueueDoneRequest") + proto.RegisterType((*WaitQueueDoneResponse)(nil), "tensorflow.eager.WaitQueueDoneResponse") + proto.RegisterType((*KeepAliveRequest)(nil), "tensorflow.eager.KeepAliveRequest") + proto.RegisterType((*KeepAliveResponse)(nil), "tensorflow.eager.KeepAliveResponse") + proto.RegisterType((*CloseContextRequest)(nil), "tensorflow.eager.CloseContextRequest") + proto.RegisterType((*CloseContextResponse)(nil), "tensorflow.eager.CloseContextResponse") + proto.RegisterType((*RegisterFunctionRequest)(nil), "tensorflow.eager.RegisterFunctionRequest") + proto.RegisterType((*RegisterFunctionResponse)(nil), "tensorflow.eager.RegisterFunctionResponse") + proto.RegisterType((*SendTensorOp)(nil), "tensorflow.eager.SendTensorOp") + proto.RegisterType((*SendTensorRequest)(nil), "tensorflow.eager.SendTensorRequest") + proto.RegisterType((*SendTensorResponse)(nil), "tensorflow.eager.SendTensorResponse") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/eager_service.proto", fileDescriptor_7f63cfa0a7bc4510) +} + +var fileDescriptor_7f63cfa0a7bc4510 = []byte{ + // 1133 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xef, 0x72, 0xdb, 0x44, + 0x10, 0xaf, 0x6c, 0xcb, 0x8d, 0xd7, 0x89, 0xeb, 0x5e, 0x9c, 0xc4, 0xb8, 0x40, 0x8d, 0x9a, 0x06, + 0x13, 0x3a, 0x4e, 0x63, 0x3a, 0x03, 0x53, 0xfa, 0x25, 0x24, 0xe9, 0x24, 0x61, 0x26, 0x29, 0xe7, + 0x4c, 0x81, 0x01, 0x46, 0x28, 0xd6, 0x3a, 0xd1, 0xc4, 0xd6, 0xa9, 0xd2, 0x29, 0x21, 0x6f, 0xc0, + 0x3b, 0xf0, 0x04, 0xbc, 0x02, 0x1f, 0x79, 0x17, 0x1e, 0x83, 0x19, 0x46, 0x77, 0x67, 0x5b, 0x96, + 0x14, 0xc7, 0x30, 0x7c, 0x93, 0x76, 0x7f, 0xfb, 0xf7, 0xf6, 0xb7, 0x77, 0xf0, 0x8c, 0xa3, 0x1b, + 0x30, 0xbf, 0x3f, 0x60, 0xd7, 0x5b, 0x3d, 0xe6, 0xe3, 0x96, 0xe7, 0x33, 0xce, 0xce, 0xc2, 0xfe, + 0x16, 0x5a, 0xe7, 0xe8, 0x9b, 0x01, 0xfa, 0x57, 0x4e, 0x0f, 0xdb, 0x42, 0x4c, 0xaa, 0x13, 0x74, + 0x5b, 0xe8, 0x1b, 0x9b, 0x49, 0xfb, 0xbe, 0x6f, 0x0d, 0xf1, 0x9a, 0xf9, 0x97, 0x5b, 0x16, 0xe7, + 0xbe, 0x79, 0x65, 0x0d, 0x42, 0x65, 0xdd, 0xd8, 0xbe, 0x1d, 0x6b, 0x63, 0x14, 0xc5, 0x8c, 0x4c, + 0x9c, 0xb3, 0x90, 0x63, 0xa0, 0x4c, 0x5a, 0xb7, 0x9b, 0xf4, 0x43, 0xb7, 0xc7, 0x1d, 0xe6, 0x2a, + 0xe4, 0xc6, 0xed, 0x48, 0xa9, 0x51, 0xb8, 0x67, 0x77, 0xe1, 0xcc, 0xe0, 0xc2, 0xf2, 0x46, 0x29, + 0x3f, 0x9d, 0x81, 0xbe, 0xf1, 0xe6, 0x49, 0xf3, 0x0a, 0xfd, 0xc0, 0x61, 0xee, 0x08, 0xf9, 0xfc, + 0xd6, 0x7e, 0x4f, 0x14, 0xa2, 0xe9, 0xa8, 0x12, 0x36, 0x7e, 0xd7, 0x80, 0x50, 0x1c, 0x32, 0x8e, + 0xa7, 0x02, 0x71, 0x60, 0xb9, 0xf6, 0x00, 0xc9, 0x32, 0xe8, 0xcc, 0x33, 0x1d, 0xbb, 0xae, 0x35, + 0xb5, 0x56, 0x9e, 0x16, 0x98, 0x77, 0x68, 0x93, 0x0f, 0x00, 0x58, 0xc8, 0xbd, 0x90, 0x9b, 0x6e, + 0x38, 0xac, 0xe7, 0x9a, 0x5a, 0x4b, 0xa7, 0x25, 0x29, 0x39, 0x0e, 0x87, 0x64, 0x15, 0x8a, 0xb2, + 0xd1, 0xf5, 0x7c, 0x53, 0x6b, 0x95, 0xa8, 0xfa, 0x23, 0x8f, 0xa0, 0xc4, 0x3c, 0x53, 0xa9, 0x0a, + 0x42, 0xb5, 0xc0, 0xbc, 0x3d, 0xa9, 0xdc, 0x04, 0xdd, 0x8e, 0x6a, 0xad, 0xeb, 0x4d, 0xad, 0x55, + 0xe9, 0xd4, 0xda, 0xb1, 0x19, 0xd8, 0xb3, 0xb8, 0x75, 0x7a, 0xe3, 0x21, 0x95, 0x10, 0xe3, 0x8f, + 0x1c, 0x94, 0x4e, 0x3c, 0xf4, 0xad, 0xe8, 0x60, 0x48, 0x05, 0x72, 0xe3, 0xfc, 0x72, 0x8e, 0x4d, + 0x08, 0x14, 0x5c, 0x6b, 0x88, 0x22, 0xaf, 0x12, 0x15, 0xdf, 0xe4, 0x15, 0x14, 0x1d, 0xd7, 0x0b, + 0x79, 0x50, 0xcf, 0x37, 0xf3, 0xad, 0x72, 0x67, 0xbd, 0x9d, 0x1c, 0xb1, 0x76, 0xba, 0x78, 0xaa, + 0x6c, 0xc8, 0x3a, 0x54, 0x7a, 0xcc, 0xe5, 0x3e, 0x1b, 0x98, 0xa2, 0x19, 0x41, 0xbd, 0xd0, 0xcc, + 0xb7, 0xf2, 0x74, 0x51, 0x49, 0x4f, 0xbc, 0x43, 0x3b, 0x20, 0xaf, 0x40, 0x8f, 0x06, 0x2b, 0xa8, + 0xeb, 0x22, 0xc4, 0x46, 0x3a, 0xc4, 0x38, 0xe7, 0xf6, 0x4e, 0x04, 0xdc, 0x77, 0xb9, 0x7f, 0x43, + 0xa5, 0x51, 0xac, 0x69, 0xc5, 0x78, 0xd3, 0x1a, 0x27, 0x00, 0x13, 0x30, 0xa9, 0x42, 0xfe, 0x12, + 0x6f, 0x44, 0xb1, 0x25, 0x1a, 0x7d, 0x92, 0x4f, 0x41, 0x17, 0xc3, 0x2f, 0xca, 0x2d, 0x77, 0x56, + 0xe2, 0x51, 0x23, 0xc3, 0xb7, 0x91, 0x92, 0x4a, 0xcc, 0xcb, 0xdc, 0x17, 0x9a, 0xf1, 0x97, 0x06, + 0xa5, 0x6f, 0x42, 0x0c, 0xf1, 0x90, 0xe3, 0x90, 0xbc, 0x81, 0xea, 0x85, 0x28, 0xd6, 0xe4, 0xcc, + 0xb4, 0xb1, 0xe7, 0x63, 0x5f, 0x78, 0x9f, 0xb3, 0x45, 0x07, 0xf7, 0x68, 0x45, 0xda, 0x9f, 0xb2, + 0x3d, 0x61, 0x4d, 0xbe, 0x8c, 0x4e, 0x59, 0xd5, 0xa9, 0x92, 0x7a, 0x34, 0xa3, 0x15, 0x07, 0xf7, + 0xe8, 0x04, 0x4f, 0x76, 0xa0, 0x1c, 0xa0, 0x6b, 0x9b, 0x12, 0x2f, 0xe6, 0xa7, 0xdc, 0xf9, 0x30, + 0x6d, 0xde, 0x45, 0xd7, 0x96, 0x79, 0x9c, 0x78, 0x07, 0xf7, 0x28, 0x04, 0xe3, 0xff, 0xaf, 0x8a, + 0x50, 0x70, 0x38, 0x0e, 0x8d, 0x5d, 0x58, 0x12, 0x65, 0x52, 0x0c, 0x3c, 0xe6, 0x06, 0x48, 0x3a, + 0xa0, 0x0b, 0xce, 0xd5, 0x35, 0x71, 0x3e, 0xef, 0xc7, 0xbd, 0x4a, 0xdb, 0x6e, 0xa4, 0x7e, 0x13, + 0xd1, 0x81, 0x4a, 0xa8, 0xf1, 0x67, 0x0e, 0x6a, 0xbb, 0x3e, 0x5a, 0x1c, 0x77, 0x99, 0xcb, 0xf1, + 0x17, 0x4e, 0xf1, 0x5d, 0x88, 0x01, 0x27, 0x2f, 0x00, 0x24, 0x7d, 0x4c, 0x7b, 0xdc, 0xb1, 0xa9, + 0xde, 0x77, 0x85, 0x76, 0x0f, 0xfb, 0xb4, 0x14, 0x8c, 0x3e, 0x49, 0x0d, 0x74, 0x2b, 0xb8, 0x71, + 0x7b, 0xa2, 0x2f, 0x0b, 0x54, 0xfe, 0x90, 0x0d, 0x78, 0x70, 0x89, 0xe8, 0x99, 0xd6, 0xc0, 0xb9, + 0x42, 0x33, 0xc0, 0x5e, 0x20, 0x0a, 0xcf, 0xd3, 0xa5, 0x48, 0xbc, 0x13, 0x49, 0xbb, 0xd8, 0x0b, + 0xc8, 0xe7, 0x50, 0x56, 0x34, 0x17, 0x41, 0x0b, 0x22, 0xe8, 0x6a, 0x3c, 0xe8, 0x5b, 0xa9, 0x8e, + 0xa2, 0xc2, 0xd5, 0xf8, 0x9b, 0x7c, 0x07, 0xef, 0xf5, 0x06, 0x61, 0xc0, 0x45, 0xb6, 0x89, 0x0d, + 0x58, 0x2f, 0xa6, 0xbb, 0x21, 0x29, 0xb9, 0x33, 0xc6, 0xd0, 0x35, 0x65, 0x9e, 0x54, 0x44, 0x9b, + 0xa0, 0x27, 0x1b, 0x13, 0xed, 0x88, 0xfb, 0x4d, 0xad, 0x55, 0xa4, 0x25, 0x25, 0x39, 0xb4, 0x8f, + 0x0a, 0x0b, 0x7a, 0xb5, 0x68, 0x5c, 0xc0, 0x4a, 0xa2, 0x87, 0xea, 0x44, 0x0e, 0xe1, 0x61, 0x3a, + 0x9f, 0xdc, 0x1c, 0xf9, 0x54, 0xed, 0x84, 0xe4, 0xa8, 0xb0, 0xa0, 0x55, 0x73, 0xc6, 0x19, 0x54, + 0xf6, 0xdd, 0x77, 0xf2, 0xd4, 0xe5, 0x39, 0x4d, 0x27, 0xa8, 0x25, 0x12, 0x24, 0xdb, 0xa0, 0x0b, + 0xb8, 0x5a, 0x0b, 0x19, 0x83, 0x3a, 0xa6, 0x0a, 0x95, 0x48, 0xe3, 0x7b, 0x78, 0x30, 0x8e, 0xa1, + 0xea, 0x78, 0x0d, 0x15, 0x21, 0x30, 0x7d, 0x25, 0x51, 0x23, 0xf6, 0xf8, 0x16, 0x77, 0x23, 0x43, + 0xba, 0x34, 0xe5, 0xc7, 0x38, 0x82, 0xda, 0xb7, 0x96, 0xc3, 0x05, 0x66, 0x8f, 0xb9, 0xf3, 0x16, + 0x31, 0xde, 0xd1, 0x39, 0xb1, 0x95, 0xc4, 0x8e, 0x36, 0xd6, 0x60, 0x25, 0xe1, 0x4b, 0x05, 0xd9, + 0x86, 0xea, 0xd7, 0xa3, 0xb1, 0x9a, 0x2f, 0x80, 0xb1, 0x0c, 0x0f, 0x63, 0x26, 0xca, 0xcf, 0x0b, + 0x58, 0xde, 0x1d, 0xb0, 0x20, 0x49, 0x8c, 0x3b, 0x5c, 0xad, 0x42, 0x6d, 0xda, 0x4a, 0x79, 0xe3, + 0xb0, 0x46, 0xf1, 0xdc, 0x89, 0x86, 0xec, 0xb5, 0xba, 0x71, 0xe7, 0xac, 0xfe, 0x25, 0x2c, 0x8e, + 0xee, 0x68, 0x41, 0x0b, 0xb9, 0x72, 0xd6, 0xe2, 0xad, 0x1f, 0x79, 0x8c, 0x78, 0x51, 0xee, 0x4f, + 0x7e, 0x8c, 0x06, 0xd4, 0xd3, 0x51, 0x55, 0x46, 0xd7, 0xb0, 0x18, 0xdf, 0x32, 0xd9, 0x37, 0xe1, + 0x36, 0xdc, 0x97, 0x71, 0x46, 0x73, 0xbb, 0x96, 0xde, 0x2a, 0x72, 0xa1, 0x8c, 0x70, 0xe4, 0x31, + 0x94, 0xd5, 0xd0, 0x8b, 0x5b, 0x4a, 0x5e, 0x91, 0x20, 0x45, 0xc7, 0xd6, 0x10, 0x8d, 0xdf, 0x34, + 0x78, 0x38, 0x89, 0xfc, 0xef, 0x67, 0x20, 0x33, 0xbb, 0xfc, 0x7f, 0xcb, 0xae, 0x90, 0xca, 0xae, + 0x06, 0x24, 0x9e, 0x9c, 0x6c, 0x56, 0xe7, 0x6f, 0x1d, 0x16, 0xf7, 0xa3, 0x01, 0xef, 0xca, 0x87, + 0x1c, 0xf9, 0x19, 0x96, 0xa6, 0x38, 0x4f, 0x32, 0xae, 0xc3, 0xac, 0xc5, 0xda, 0xf8, 0xf8, 0x4e, + 0x9c, 0x22, 0xdd, 0x31, 0xdc, 0x57, 0x3c, 0x24, 0xcd, 0xb4, 0xcd, 0xf4, 0x1a, 0x68, 0x7c, 0x34, + 0x03, 0xa1, 0xfc, 0xfd, 0x00, 0xd5, 0x2e, 0xf7, 0xd1, 0x1a, 0x3a, 0xee, 0xf9, 0xff, 0xe9, 0xb8, + 0xa5, 0x3d, 0xd7, 0xa2, 0x76, 0x4c, 0xb1, 0x31, 0xab, 0x1d, 0x59, 0xd4, 0xcf, 0x6a, 0x47, 0x26, + 0xad, 0xc9, 0x29, 0x94, 0xc6, 0x1c, 0x25, 0x46, 0xda, 0x2a, 0xc9, 0xf9, 0xc6, 0x93, 0x99, 0x18, + 0xe5, 0xf5, 0x27, 0x58, 0x8c, 0xd3, 0x95, 0x3c, 0xcd, 0x38, 0x9d, 0xf4, 0x12, 0x68, 0x6c, 0xdc, + 0x05, 0x53, 0xee, 0x1d, 0xa8, 0x26, 0xf9, 0x47, 0x3e, 0xc9, 0x7a, 0x77, 0x64, 0x6e, 0x86, 0xc6, + 0xe6, 0x3c, 0x50, 0x15, 0xea, 0x47, 0x80, 0xc9, 0xdc, 0x92, 0x27, 0xb3, 0x9e, 0x14, 0x23, 0xf7, + 0xeb, 0xb3, 0x41, 0x6a, 0x4f, 0xe4, 0x7f, 0xcd, 0x69, 0x67, 0x45, 0xf1, 0x88, 0xfe, 0xec, 0x9f, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xfc, 0xd3, 0x3a, 0x55, 0xe8, 0x0c, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/eager_service.proto b/executor/proto/tensorflow/core/protobuf/eager_service.proto new file mode 100644 index 0000000000..038ba3c1fc --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/eager_service.proto @@ -0,0 +1,235 @@ +syntax = "proto3"; + +package tensorflow.eager; + +import "tensorflow/core/framework/attr_value.proto"; +import "tensorflow/core/framework/device_attributes.proto"; +import "tensorflow/core/framework/function.proto"; +import "tensorflow/core/framework/tensor.proto"; +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/types.proto"; +import "tensorflow/core/framework/versions.proto"; +import "tensorflow/core/protobuf/tensorflow_server.proto"; + +message RemoteTensorHandle { + // The ID of the operation that produced this tensor. + int64 op_id = 1; + // The index into the outputs of the operation that produced this tensor. + int32 output_num = 2; + // Device of the operation that produced this tensor. Cannot be empty. + // For multi-device functions, it's the default device passed to placer. + string device = 3; + // Device where the tensor is located. Can be empty if the operation producing + // this tensor is a multi-device function. + string op_device = 4; + // Tensor type. + DataType dtype = 5; +} + +// A proto representation of an eager operation. +message Operation { + // A unique identifier for the operation. Set by the client so that the client + // can uniquely identify the outputs of the scheduled operation. + // + // In the initial implementation, sending duplicate IDs has undefined + // behaviour, but additional constraints may be placed upon this in the + // future. + int64 id = 1; + string name = 2; + repeated RemoteTensorHandle inputs = 3; + + // Control Operation IDs that will be respected when ops are re-ordered by + // async execution. If async execution (+ op re-ordering) is not enabled, this + // should have no effect. + repeated int64 control_op_ids = 4; + map attrs = 5; + string device = 6; +} + +message QueueItem { + // The remote executor should be able to handle either executing ops directly, + // or releasing any unused tensor handles, since the tensor lifetime is + // maintained by the client. + oneof item { + RemoteTensorHandle handle_to_decref = 1; + Operation operation = 2; + SendTensorOp send_tensor = 3; + } +} + +message QueueResponse { + repeated TensorShapeProto shape = 1; +} + +message CreateContextRequest { + // Identifies the full cluster, and this particular worker's position within. + ServerDef server_def = 1; + + // Whether the ops on the worker should be executed synchronously or + // asynchronously. By default, ops are executed synchronously. + bool async = 2; + + // Number of seconds to keep the context alive. If more than keep_alive_secs + // has passed since a particular context has been communicated with, it will + // be garbage collected. + int64 keep_alive_secs = 3; + + // This is the version for all the ops that will be enqueued by the client. + VersionDef version_def = 4; + + // Device attributes in the cluster + repeated DeviceAttributes cluster_device_attributes = 6; + + // The ID of the created context. This is usually a randomly generated number, + // that will be used to identify the context in future requests to the + // service. Contexts are not persisted through server restarts. + // This ID will be used for all future communications as well. It is essential + // that both ends use this ID for selecting a rendezvous to get everything to + // match. + fixed64 context_id = 7; + + reserved 5; +} + +message CreateContextResponse { + // List of devices that are locally accessible to the worker. + repeated DeviceAttributes device_attributes = 2; + + reserved 1; +} + +message EnqueueRequest { + fixed64 context_id = 1; + + repeated QueueItem queue = 3; +} + +message EnqueueResponse { + // A single operation response for every item in the request. + repeated QueueResponse queue_response = 1; +} + +message WaitQueueDoneRequest { + fixed64 context_id = 1; + + // Ids to wait on. If empty, wait on everything currently pending. + repeated int64 op_id = 2; +} + +message WaitQueueDoneResponse { + // TODO(nareshmodi): Consider adding NodeExecStats here to be able to + // propagate some stats. +} + +message KeepAliveRequest { + fixed64 context_id = 1; +} + +message KeepAliveResponse {} + +message CloseContextRequest { + fixed64 context_id = 1; +} + +message CloseContextResponse {} + +message RegisterFunctionRequest { + fixed64 context_id = 1; + + FunctionDef function_def = 2; +} + +message RegisterFunctionResponse {} + +message SendTensorOp { + // All remote tensors are identified by . To mimic this + // situation when directly sending tensors, we include an "artificial" op ID + // (which would have corresponded to the _Recv op when not using SendTensor). + int64 op_id = 1; + // The index within the repeated field is the output number that will help + // uniquely identify (along with the above op_id) the particular tensor. + repeated TensorProto tensors = 2; + + // The device on which the tensors should be resident. + string device_name = 3; +} + +message SendTensorRequest { + fixed64 context_id = 1; + + // All remote tensors are identified by . To mimic this + // situation when directly sending tensors, we include an "artificial" op ID + // (which would have corresponded to the _Recv op when not using SendTensor). + int64 op_id = 2; + // The index within the repeated field is the output number that will help + // uniquely identify (along with the above op_id) the particular tensor. + repeated TensorProto tensors = 3; + + // The device on which the tensors should be resident. + string device_name = 4; +} + +message SendTensorResponse {} + +//////////////////////////////////////////////////////////////////////////////// +// +// Eager Service defines a TensorFlow service that executes operations eagerly +// on a set of local devices, on behalf of a remote Eager executor. +// +// The service impl will keep track of the various clients and devices it has +// access to and allows the client to enqueue ops on any devices that it is able +// to access and schedule data transfers from/to any of the peers. +// +// A client can generate multiple contexts to be able to independently execute +// operations, but cannot share data between the two contexts. +// +// NOTE: Even though contexts generated by clients should be independent, the +// lower level tensorflow execution engine is not, so they might share some data +// (e.g. a Device's ResourceMgr). +// +//////////////////////////////////////////////////////////////////////////////// +service EagerService { + // This initializes the worker, informing it about the other workers in the + // cluster and exchanging authentication tokens which will be used in all + // other RPCs to detect whether the worker has restarted. + rpc CreateContext(CreateContextRequest) returns (CreateContextResponse); + + // This takes a list of Execute and DeleteTensorHandle operations and enqueues + // (in async mode) or executes (in sync mode) them on the remote server. + // All outputs of ops which were not explicitly deleted with + // DeleteTensorHandle entries will be assumed to be alive and are usable by + // future calls to Enqueue. + rpc Enqueue(EnqueueRequest) returns (EnqueueResponse); + + // A streaming version of Enqueue. + // Current server implementation sends one response per received request. + // The benefit for using a streaming version is that subsequent requests + // can be sent without waiting for a response to the previous request. This + // synchronization is required in the regular Enqueue call because gRPC does + // not guarantee to preserve request order. + rpc StreamingEnqueue(stream EnqueueRequest) returns (stream EnqueueResponse); + + // Takes a set of op IDs and waits until those ops are done. Returns any error + // in the stream so far. + rpc WaitQueueDone(WaitQueueDoneRequest) returns (WaitQueueDoneResponse); + + // Contexts are always created with a deadline and no RPCs within a deadline + // will trigger a context garbage collection. KeepAlive calls can be used to + // delay this. + rpc KeepAlive(KeepAliveRequest) returns (KeepAliveResponse); + + // Closes the context. No calls to other methods using the existing context ID + // are valid after this. + rpc CloseContext(CloseContextRequest) returns (CloseContextResponse); + + // Takes a FunctionDef and makes it enqueable on the remote worker. + rpc RegisterFunction(RegisterFunctionRequest) + returns (RegisterFunctionResponse); + + // TODO(fishx): Remove this method. + // An RPC to push tensors to the server. At times, certain environments don't + // allow the server to connect back to the client. + rpc SendTensor(SendTensorRequest) returns (SendTensorResponse) { + option deprecated = true; + } +} diff --git a/executor/proto/tensorflow/core/protobuf/graph_debug_info.pb.go b/executor/proto/tensorflow/core/protobuf/graph_debug_info.pb.go new file mode 100644 index 0000000000..3e6dfaa20b --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/graph_debug_info.pb.go @@ -0,0 +1,226 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/graph_debug_info.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type GraphDebugInfo struct { + // This stores all the source code file names and can be indexed by the + // `file_index`. + Files []string `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty"` + // This maps a node name to a stack trace in the source code. + Traces map[string]*GraphDebugInfo_StackTrace `protobuf:"bytes,2,rep,name=traces,proto3" json:"traces,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GraphDebugInfo) Reset() { *m = GraphDebugInfo{} } +func (m *GraphDebugInfo) String() string { return proto.CompactTextString(m) } +func (*GraphDebugInfo) ProtoMessage() {} +func (*GraphDebugInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2d49d5c184d173e1, []int{0} +} + +func (m *GraphDebugInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GraphDebugInfo.Unmarshal(m, b) +} +func (m *GraphDebugInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GraphDebugInfo.Marshal(b, m, deterministic) +} +func (m *GraphDebugInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GraphDebugInfo.Merge(m, src) +} +func (m *GraphDebugInfo) XXX_Size() int { + return xxx_messageInfo_GraphDebugInfo.Size(m) +} +func (m *GraphDebugInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GraphDebugInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GraphDebugInfo proto.InternalMessageInfo + +func (m *GraphDebugInfo) GetFiles() []string { + if m != nil { + return m.Files + } + return nil +} + +func (m *GraphDebugInfo) GetTraces() map[string]*GraphDebugInfo_StackTrace { + if m != nil { + return m.Traces + } + return nil +} + +// This represents a file/line location in the source code. +type GraphDebugInfo_FileLineCol struct { + // File name index, which can be used to retrieve the file name string from + // `files`. The value should be between 0 and (len(files)-1) + FileIndex int32 `protobuf:"varint,1,opt,name=file_index,json=fileIndex,proto3" json:"file_index,omitempty"` + // Line number in the file. + Line int32 `protobuf:"varint,2,opt,name=line,proto3" json:"line,omitempty"` + // Col number in the file line. + Col int32 `protobuf:"varint,3,opt,name=col,proto3" json:"col,omitempty"` + // Name of function contains the file line. + Func string `protobuf:"bytes,4,opt,name=func,proto3" json:"func,omitempty"` + // Source code contained in this file line. + Code string `protobuf:"bytes,5,opt,name=code,proto3" json:"code,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GraphDebugInfo_FileLineCol) Reset() { *m = GraphDebugInfo_FileLineCol{} } +func (m *GraphDebugInfo_FileLineCol) String() string { return proto.CompactTextString(m) } +func (*GraphDebugInfo_FileLineCol) ProtoMessage() {} +func (*GraphDebugInfo_FileLineCol) Descriptor() ([]byte, []int) { + return fileDescriptor_2d49d5c184d173e1, []int{0, 0} +} + +func (m *GraphDebugInfo_FileLineCol) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GraphDebugInfo_FileLineCol.Unmarshal(m, b) +} +func (m *GraphDebugInfo_FileLineCol) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GraphDebugInfo_FileLineCol.Marshal(b, m, deterministic) +} +func (m *GraphDebugInfo_FileLineCol) XXX_Merge(src proto.Message) { + xxx_messageInfo_GraphDebugInfo_FileLineCol.Merge(m, src) +} +func (m *GraphDebugInfo_FileLineCol) XXX_Size() int { + return xxx_messageInfo_GraphDebugInfo_FileLineCol.Size(m) +} +func (m *GraphDebugInfo_FileLineCol) XXX_DiscardUnknown() { + xxx_messageInfo_GraphDebugInfo_FileLineCol.DiscardUnknown(m) +} + +var xxx_messageInfo_GraphDebugInfo_FileLineCol proto.InternalMessageInfo + +func (m *GraphDebugInfo_FileLineCol) GetFileIndex() int32 { + if m != nil { + return m.FileIndex + } + return 0 +} + +func (m *GraphDebugInfo_FileLineCol) GetLine() int32 { + if m != nil { + return m.Line + } + return 0 +} + +func (m *GraphDebugInfo_FileLineCol) GetCol() int32 { + if m != nil { + return m.Col + } + return 0 +} + +func (m *GraphDebugInfo_FileLineCol) GetFunc() string { + if m != nil { + return m.Func + } + return "" +} + +func (m *GraphDebugInfo_FileLineCol) GetCode() string { + if m != nil { + return m.Code + } + return "" +} + +// This represents a stack trace which is a ordered list of `FileLineCol`. +type GraphDebugInfo_StackTrace struct { + // Each line in the stack trace. + FileLineCols []*GraphDebugInfo_FileLineCol `protobuf:"bytes,1,rep,name=file_line_cols,json=fileLineCols,proto3" json:"file_line_cols,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GraphDebugInfo_StackTrace) Reset() { *m = GraphDebugInfo_StackTrace{} } +func (m *GraphDebugInfo_StackTrace) String() string { return proto.CompactTextString(m) } +func (*GraphDebugInfo_StackTrace) ProtoMessage() {} +func (*GraphDebugInfo_StackTrace) Descriptor() ([]byte, []int) { + return fileDescriptor_2d49d5c184d173e1, []int{0, 1} +} + +func (m *GraphDebugInfo_StackTrace) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GraphDebugInfo_StackTrace.Unmarshal(m, b) +} +func (m *GraphDebugInfo_StackTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GraphDebugInfo_StackTrace.Marshal(b, m, deterministic) +} +func (m *GraphDebugInfo_StackTrace) XXX_Merge(src proto.Message) { + xxx_messageInfo_GraphDebugInfo_StackTrace.Merge(m, src) +} +func (m *GraphDebugInfo_StackTrace) XXX_Size() int { + return xxx_messageInfo_GraphDebugInfo_StackTrace.Size(m) +} +func (m *GraphDebugInfo_StackTrace) XXX_DiscardUnknown() { + xxx_messageInfo_GraphDebugInfo_StackTrace.DiscardUnknown(m) +} + +var xxx_messageInfo_GraphDebugInfo_StackTrace proto.InternalMessageInfo + +func (m *GraphDebugInfo_StackTrace) GetFileLineCols() []*GraphDebugInfo_FileLineCol { + if m != nil { + return m.FileLineCols + } + return nil +} + +func init() { + proto.RegisterType((*GraphDebugInfo)(nil), "tensorflow.GraphDebugInfo") + proto.RegisterMapType((map[string]*GraphDebugInfo_StackTrace)(nil), "tensorflow.GraphDebugInfo.TracesEntry") + proto.RegisterType((*GraphDebugInfo_FileLineCol)(nil), "tensorflow.GraphDebugInfo.FileLineCol") + proto.RegisterType((*GraphDebugInfo_StackTrace)(nil), "tensorflow.GraphDebugInfo.StackTrace") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/graph_debug_info.proto", fileDescriptor_2d49d5c184d173e1) +} + +var fileDescriptor_2d49d5c184d173e1 = []byte{ + // 331 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x51, 0x4f, 0x4b, 0xc3, 0x30, + 0x1c, 0x25, 0xeb, 0x3a, 0xe8, 0xaf, 0x32, 0x24, 0xec, 0x10, 0x06, 0x42, 0x11, 0x94, 0x9e, 0x5a, + 0x98, 0x08, 0xa2, 0xe0, 0x61, 0xfe, 0x63, 0xb0, 0xc3, 0x88, 0x9e, 0xbc, 0xd4, 0x2e, 0x4b, 0x66, + 0x59, 0x4c, 0x46, 0xda, 0x3a, 0xe7, 0xa7, 0xf1, 0x63, 0x7a, 0x94, 0xa4, 0x42, 0xb7, 0xcb, 0x6e, + 0x2f, 0x2f, 0xef, 0xbd, 0xbc, 0x47, 0x20, 0xad, 0xb8, 0x2a, 0xb5, 0x11, 0x52, 0x6f, 0x52, 0xa6, + 0x0d, 0x4f, 0xd7, 0x46, 0x57, 0x7a, 0x5e, 0x8b, 0x74, 0x69, 0xf2, 0xf5, 0x7b, 0xb6, 0xe0, 0xf3, + 0x7a, 0x99, 0x15, 0x4a, 0xe8, 0xc4, 0xdd, 0x60, 0x68, 0x0d, 0xa7, 0x3f, 0x1e, 0xf4, 0x9f, 0xac, + 0xec, 0xde, 0xaa, 0x26, 0x4a, 0x68, 0x3c, 0x00, 0x5f, 0x14, 0x92, 0x97, 0x04, 0x45, 0x5e, 0x1c, + 0xd0, 0xe6, 0x80, 0x6f, 0xa1, 0x57, 0x99, 0x9c, 0xf1, 0x92, 0x74, 0x22, 0x2f, 0x0e, 0x47, 0xe7, + 0x49, 0x9b, 0x92, 0xec, 0x27, 0x24, 0x2f, 0x4e, 0xf8, 0xa0, 0x2a, 0xb3, 0xa5, 0xff, 0xae, 0xe1, + 0x37, 0x84, 0x8f, 0x85, 0xe4, 0xd3, 0x42, 0xf1, 0x3b, 0x2d, 0xf1, 0x09, 0x80, 0xcd, 0xcd, 0x0a, + 0xb5, 0xe0, 0x5f, 0x04, 0x45, 0x28, 0xf6, 0x69, 0x60, 0x99, 0x89, 0x25, 0x30, 0x86, 0xae, 0x2c, + 0x14, 0x27, 0x1d, 0x77, 0xe1, 0x30, 0x3e, 0x06, 0x8f, 0x69, 0x49, 0x3c, 0x47, 0x59, 0x68, 0x55, + 0xa2, 0x56, 0x8c, 0x74, 0x23, 0x14, 0x07, 0xd4, 0x61, 0xcb, 0x31, 0xbd, 0xe0, 0xc4, 0x6f, 0x38, + 0x8b, 0x87, 0xaf, 0x00, 0xcf, 0x55, 0xce, 0x56, 0xae, 0x17, 0x9e, 0x42, 0xdf, 0x3d, 0x6d, 0x43, + 0x33, 0xa6, 0x65, 0x33, 0xf4, 0xf0, 0xa2, 0x9d, 0xea, 0xf4, 0x48, 0xb4, 0x87, 0x72, 0xf8, 0x06, + 0xe1, 0xce, 0x5c, 0x5b, 0x72, 0xc5, 0xb7, 0x6e, 0x50, 0x40, 0x2d, 0xc4, 0x37, 0xe0, 0x7f, 0xe6, + 0xb2, 0x6e, 0xb6, 0x84, 0xa3, 0xb3, 0x03, 0xaf, 0xb4, 0x25, 0x69, 0xe3, 0xb9, 0xee, 0x5c, 0xa1, + 0xf1, 0x25, 0x10, 0x6d, 0x96, 0xbb, 0x36, 0x61, 0xf2, 0x0f, 0xbe, 0xd1, 0x66, 0x35, 0x1e, 0xec, + 0x27, 0xcc, 0xec, 0xff, 0x96, 0x33, 0xf4, 0x8b, 0xd0, 0xbc, 0xe7, 0x3e, 0xfb, 0xe2, 0x2f, 0x00, + 0x00, 0xff, 0xff, 0x38, 0x4d, 0xa8, 0x46, 0x1f, 0x02, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/graph_debug_info.proto b/executor/proto/tensorflow/core/protobuf/graph_debug_info.proto new file mode 100644 index 0000000000..45f930cfac --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/graph_debug_info.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "GraphDebugInfoProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; + +message GraphDebugInfo { + // This represents a file/line location in the source code. + message FileLineCol { + // File name index, which can be used to retrieve the file name string from + // `files`. The value should be between 0 and (len(files)-1) + int32 file_index = 1; + + // Line number in the file. + int32 line = 2; + + // Col number in the file line. + int32 col = 3; + + // Name of function contains the file line. + string func = 4; + + // Source code contained in this file line. + string code = 5; + } + + // This represents a stack trace which is a ordered list of `FileLineCol`. + message StackTrace { + // Each line in the stack trace. + repeated FileLineCol file_line_cols = 1; + } + + // This stores all the source code file names and can be indexed by the + // `file_index`. + repeated string files = 1; + + // This maps a node name to a stack trace in the source code. + map traces = 2; +} diff --git a/executor/proto/tensorflow/core/protobuf/master.pb.go b/executor/proto/tensorflow/core/protobuf/master.pb.go new file mode 100644 index 0000000000..9f086d203f --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/master.pb.go @@ -0,0 +1,1211 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/master.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + framework "github.com/tensorflow/tensorflow/tensorflow/go/core/framework" + core "github.com/tensorflow/tensorflow/tensorflow/go/core/lib/core" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type CreateSessionRequest struct { + // The initial graph definition. + GraphDef *framework.GraphDef `protobuf:"bytes,1,opt,name=graph_def,json=graphDef,proto3" json:"graph_def,omitempty"` + // Configuration options. + Config *ConfigProto `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + // The target string used from the client's perspective. + Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSessionRequest) Reset() { *m = CreateSessionRequest{} } +func (m *CreateSessionRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSessionRequest) ProtoMessage() {} +func (*CreateSessionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5171b2a5dcde72cd, []int{0} +} + +func (m *CreateSessionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSessionRequest.Unmarshal(m, b) +} +func (m *CreateSessionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSessionRequest.Marshal(b, m, deterministic) +} +func (m *CreateSessionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSessionRequest.Merge(m, src) +} +func (m *CreateSessionRequest) XXX_Size() int { + return xxx_messageInfo_CreateSessionRequest.Size(m) +} +func (m *CreateSessionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSessionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSessionRequest proto.InternalMessageInfo + +func (m *CreateSessionRequest) GetGraphDef() *framework.GraphDef { + if m != nil { + return m.GraphDef + } + return nil +} + +func (m *CreateSessionRequest) GetConfig() *ConfigProto { + if m != nil { + return m.Config + } + return nil +} + +func (m *CreateSessionRequest) GetTarget() string { + if m != nil { + return m.Target + } + return "" +} + +type CreateSessionResponse struct { + // The session handle to be used in subsequent calls for the created session. + // + // The client must arrange to call CloseSession with this returned + // session handle to close the session. + SessionHandle string `protobuf:"bytes,1,opt,name=session_handle,json=sessionHandle,proto3" json:"session_handle,omitempty"` + // The initial version number for the graph, to be used in the next call + // to ExtendSession. + GraphVersion int64 `protobuf:"varint,2,opt,name=graph_version,json=graphVersion,proto3" json:"graph_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSessionResponse) Reset() { *m = CreateSessionResponse{} } +func (m *CreateSessionResponse) String() string { return proto.CompactTextString(m) } +func (*CreateSessionResponse) ProtoMessage() {} +func (*CreateSessionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5171b2a5dcde72cd, []int{1} +} + +func (m *CreateSessionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSessionResponse.Unmarshal(m, b) +} +func (m *CreateSessionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSessionResponse.Marshal(b, m, deterministic) +} +func (m *CreateSessionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSessionResponse.Merge(m, src) +} +func (m *CreateSessionResponse) XXX_Size() int { + return xxx_messageInfo_CreateSessionResponse.Size(m) +} +func (m *CreateSessionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSessionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSessionResponse proto.InternalMessageInfo + +func (m *CreateSessionResponse) GetSessionHandle() string { + if m != nil { + return m.SessionHandle + } + return "" +} + +func (m *CreateSessionResponse) GetGraphVersion() int64 { + if m != nil { + return m.GraphVersion + } + return 0 +} + +type ExtendSessionRequest struct { + // REQUIRED: session_handle must be returned by a CreateSession call + // to the same master service. + SessionHandle string `protobuf:"bytes,1,opt,name=session_handle,json=sessionHandle,proto3" json:"session_handle,omitempty"` + // REQUIRED: The nodes to be added to the session's graph. If any node has + // the same name as an existing node, the operation will fail with + // ILLEGAL_ARGUMENT. + GraphDef *framework.GraphDef `protobuf:"bytes,2,opt,name=graph_def,json=graphDef,proto3" json:"graph_def,omitempty"` + // REQUIRED: The version number of the graph to be extended. This will be + // tested against the current server-side version number, and the operation + // will fail with FAILED_PRECONDITION if they do not match. + CurrentGraphVersion int64 `protobuf:"varint,3,opt,name=current_graph_version,json=currentGraphVersion,proto3" json:"current_graph_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtendSessionRequest) Reset() { *m = ExtendSessionRequest{} } +func (m *ExtendSessionRequest) String() string { return proto.CompactTextString(m) } +func (*ExtendSessionRequest) ProtoMessage() {} +func (*ExtendSessionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5171b2a5dcde72cd, []int{2} +} + +func (m *ExtendSessionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtendSessionRequest.Unmarshal(m, b) +} +func (m *ExtendSessionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtendSessionRequest.Marshal(b, m, deterministic) +} +func (m *ExtendSessionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtendSessionRequest.Merge(m, src) +} +func (m *ExtendSessionRequest) XXX_Size() int { + return xxx_messageInfo_ExtendSessionRequest.Size(m) +} +func (m *ExtendSessionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExtendSessionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtendSessionRequest proto.InternalMessageInfo + +func (m *ExtendSessionRequest) GetSessionHandle() string { + if m != nil { + return m.SessionHandle + } + return "" +} + +func (m *ExtendSessionRequest) GetGraphDef() *framework.GraphDef { + if m != nil { + return m.GraphDef + } + return nil +} + +func (m *ExtendSessionRequest) GetCurrentGraphVersion() int64 { + if m != nil { + return m.CurrentGraphVersion + } + return 0 +} + +type ExtendSessionResponse struct { + // The new version number for the extended graph, to be used in the next call + // to ExtendSession. + NewGraphVersion int64 `protobuf:"varint,4,opt,name=new_graph_version,json=newGraphVersion,proto3" json:"new_graph_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtendSessionResponse) Reset() { *m = ExtendSessionResponse{} } +func (m *ExtendSessionResponse) String() string { return proto.CompactTextString(m) } +func (*ExtendSessionResponse) ProtoMessage() {} +func (*ExtendSessionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5171b2a5dcde72cd, []int{3} +} + +func (m *ExtendSessionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtendSessionResponse.Unmarshal(m, b) +} +func (m *ExtendSessionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtendSessionResponse.Marshal(b, m, deterministic) +} +func (m *ExtendSessionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtendSessionResponse.Merge(m, src) +} +func (m *ExtendSessionResponse) XXX_Size() int { + return xxx_messageInfo_ExtendSessionResponse.Size(m) +} +func (m *ExtendSessionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExtendSessionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtendSessionResponse proto.InternalMessageInfo + +func (m *ExtendSessionResponse) GetNewGraphVersion() int64 { + if m != nil { + return m.NewGraphVersion + } + return 0 +} + +type RunStepRequest struct { + // REQUIRED: session_handle must be returned by a CreateSession call + // to the same master service. + SessionHandle string `protobuf:"bytes,1,opt,name=session_handle,json=sessionHandle,proto3" json:"session_handle,omitempty"` + // Tensors to be fed in the step. Each feed is a named tensor. + Feed []*NamedTensorProto `protobuf:"bytes,2,rep,name=feed,proto3" json:"feed,omitempty"` + // Fetches. A list of tensor names. The caller expects a tensor to + // be returned for each fetch[i] (see RunStepResponse.tensor). The + // order of specified fetches does not change the execution order. + Fetch []string `protobuf:"bytes,3,rep,name=fetch,proto3" json:"fetch,omitempty"` + // Target Nodes. A list of node names. The named nodes will be run + // to but their outputs will not be fetched. + Target []string `protobuf:"bytes,4,rep,name=target,proto3" json:"target,omitempty"` + // Options for the run call. + Options *RunOptions `protobuf:"bytes,5,opt,name=options,proto3" json:"options,omitempty"` + // Partial run handle (optional). If specified, this will be a partial run + // execution, run up to the specified fetches. + PartialRunHandle string `protobuf:"bytes,6,opt,name=partial_run_handle,json=partialRunHandle,proto3" json:"partial_run_handle,omitempty"` + // If true then some errors, e.g., execution errors that have long + // error messages, may return an OK RunStepResponse with the actual + // error saved in the status_code/status_error_message fields of the + // response body. This is a workaround since the RPC subsystem may + // truncate long metadata messages. + StoreErrorsInResponseBody bool `protobuf:"varint,7,opt,name=store_errors_in_response_body,json=storeErrorsInResponseBody,proto3" json:"store_errors_in_response_body,omitempty"` + // Unique identifier for this request. Every RunStepRequest must + // have a unique request_id, and retried RunStepRequest must have + // the same request_id. If request_id is zero, retry detection is disabled. + RequestId int64 `protobuf:"varint,8,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RunStepRequest) Reset() { *m = RunStepRequest{} } +func (m *RunStepRequest) String() string { return proto.CompactTextString(m) } +func (*RunStepRequest) ProtoMessage() {} +func (*RunStepRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5171b2a5dcde72cd, []int{4} +} + +func (m *RunStepRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RunStepRequest.Unmarshal(m, b) +} +func (m *RunStepRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RunStepRequest.Marshal(b, m, deterministic) +} +func (m *RunStepRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunStepRequest.Merge(m, src) +} +func (m *RunStepRequest) XXX_Size() int { + return xxx_messageInfo_RunStepRequest.Size(m) +} +func (m *RunStepRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RunStepRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RunStepRequest proto.InternalMessageInfo + +func (m *RunStepRequest) GetSessionHandle() string { + if m != nil { + return m.SessionHandle + } + return "" +} + +func (m *RunStepRequest) GetFeed() []*NamedTensorProto { + if m != nil { + return m.Feed + } + return nil +} + +func (m *RunStepRequest) GetFetch() []string { + if m != nil { + return m.Fetch + } + return nil +} + +func (m *RunStepRequest) GetTarget() []string { + if m != nil { + return m.Target + } + return nil +} + +func (m *RunStepRequest) GetOptions() *RunOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *RunStepRequest) GetPartialRunHandle() string { + if m != nil { + return m.PartialRunHandle + } + return "" +} + +func (m *RunStepRequest) GetStoreErrorsInResponseBody() bool { + if m != nil { + return m.StoreErrorsInResponseBody + } + return false +} + +func (m *RunStepRequest) GetRequestId() int64 { + if m != nil { + return m.RequestId + } + return 0 +} + +type RunStepResponse struct { + // NOTE: The order of the returned tensors may or may not match + // the fetch order specified in RunStepRequest. + Tensor []*NamedTensorProto `protobuf:"bytes,1,rep,name=tensor,proto3" json:"tensor,omitempty"` + // Returned metadata if requested in the options. + Metadata *RunMetadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + // If store_errors_in_response_body is true in the request, then + // optionally the server may return an OK status for the RPC and + // fill the true status into the fields below, to allow for messages + // that are too long to fit in metadata. + StatusCode core.Code `protobuf:"varint,3,opt,name=status_code,json=statusCode,proto3,enum=tensorflow.error.Code" json:"status_code,omitempty"` + StatusErrorMessage string `protobuf:"bytes,4,opt,name=status_error_message,json=statusErrorMessage,proto3" json:"status_error_message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RunStepResponse) Reset() { *m = RunStepResponse{} } +func (m *RunStepResponse) String() string { return proto.CompactTextString(m) } +func (*RunStepResponse) ProtoMessage() {} +func (*RunStepResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5171b2a5dcde72cd, []int{5} +} + +func (m *RunStepResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RunStepResponse.Unmarshal(m, b) +} +func (m *RunStepResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RunStepResponse.Marshal(b, m, deterministic) +} +func (m *RunStepResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunStepResponse.Merge(m, src) +} +func (m *RunStepResponse) XXX_Size() int { + return xxx_messageInfo_RunStepResponse.Size(m) +} +func (m *RunStepResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RunStepResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RunStepResponse proto.InternalMessageInfo + +func (m *RunStepResponse) GetTensor() []*NamedTensorProto { + if m != nil { + return m.Tensor + } + return nil +} + +func (m *RunStepResponse) GetMetadata() *RunMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *RunStepResponse) GetStatusCode() core.Code { + if m != nil { + return m.StatusCode + } + return core.Code_OK +} + +func (m *RunStepResponse) GetStatusErrorMessage() string { + if m != nil { + return m.StatusErrorMessage + } + return "" +} + +type PartialRunSetupRequest struct { + // REQUIRED: session_handle must be returned by a CreateSession call + // to the same master service. + SessionHandle string `protobuf:"bytes,1,opt,name=session_handle,json=sessionHandle,proto3" json:"session_handle,omitempty"` + // Tensors to be fed in future steps. + Feed []string `protobuf:"bytes,2,rep,name=feed,proto3" json:"feed,omitempty"` + // Fetches. A list of tensor names. The caller expects a tensor to be returned + // for each fetch[i] (see RunStepResponse.tensor), for corresponding partial + // RunStepRequests. The order of specified fetches does not change the + // execution order. + Fetch []string `protobuf:"bytes,3,rep,name=fetch,proto3" json:"fetch,omitempty"` + // Target Nodes. A list of node names. The named nodes will be run in future + // steps, but their outputs will not be fetched. + Target []string `protobuf:"bytes,4,rep,name=target,proto3" json:"target,omitempty"` + // Unique identifier for this request. Every PartialRunSetupRequest must + // have a unique request_id, and retried PartialRunSetupRequest must have + // the same request_id. If request_id is zero, retry detection is disabled. + RequestId int64 `protobuf:"varint,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PartialRunSetupRequest) Reset() { *m = PartialRunSetupRequest{} } +func (m *PartialRunSetupRequest) String() string { return proto.CompactTextString(m) } +func (*PartialRunSetupRequest) ProtoMessage() {} +func (*PartialRunSetupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5171b2a5dcde72cd, []int{6} +} + +func (m *PartialRunSetupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PartialRunSetupRequest.Unmarshal(m, b) +} +func (m *PartialRunSetupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PartialRunSetupRequest.Marshal(b, m, deterministic) +} +func (m *PartialRunSetupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialRunSetupRequest.Merge(m, src) +} +func (m *PartialRunSetupRequest) XXX_Size() int { + return xxx_messageInfo_PartialRunSetupRequest.Size(m) +} +func (m *PartialRunSetupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PartialRunSetupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PartialRunSetupRequest proto.InternalMessageInfo + +func (m *PartialRunSetupRequest) GetSessionHandle() string { + if m != nil { + return m.SessionHandle + } + return "" +} + +func (m *PartialRunSetupRequest) GetFeed() []string { + if m != nil { + return m.Feed + } + return nil +} + +func (m *PartialRunSetupRequest) GetFetch() []string { + if m != nil { + return m.Fetch + } + return nil +} + +func (m *PartialRunSetupRequest) GetTarget() []string { + if m != nil { + return m.Target + } + return nil +} + +func (m *PartialRunSetupRequest) GetRequestId() int64 { + if m != nil { + return m.RequestId + } + return 0 +} + +type PartialRunSetupResponse struct { + // The unique handle corresponding to the ongoing partial run call setup by + // the invocation to PartialRunSetup. This handle may be passed to + // RunStepRequest to send and receive tensors for this partial run. + PartialRunHandle string `protobuf:"bytes,1,opt,name=partial_run_handle,json=partialRunHandle,proto3" json:"partial_run_handle,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PartialRunSetupResponse) Reset() { *m = PartialRunSetupResponse{} } +func (m *PartialRunSetupResponse) String() string { return proto.CompactTextString(m) } +func (*PartialRunSetupResponse) ProtoMessage() {} +func (*PartialRunSetupResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5171b2a5dcde72cd, []int{7} +} + +func (m *PartialRunSetupResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PartialRunSetupResponse.Unmarshal(m, b) +} +func (m *PartialRunSetupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PartialRunSetupResponse.Marshal(b, m, deterministic) +} +func (m *PartialRunSetupResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialRunSetupResponse.Merge(m, src) +} +func (m *PartialRunSetupResponse) XXX_Size() int { + return xxx_messageInfo_PartialRunSetupResponse.Size(m) +} +func (m *PartialRunSetupResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PartialRunSetupResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PartialRunSetupResponse proto.InternalMessageInfo + +func (m *PartialRunSetupResponse) GetPartialRunHandle() string { + if m != nil { + return m.PartialRunHandle + } + return "" +} + +type CloseSessionRequest struct { + // REQUIRED: session_handle must be returned by a CreateSession call + // to the same master service. + SessionHandle string `protobuf:"bytes,1,opt,name=session_handle,json=sessionHandle,proto3" json:"session_handle,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseSessionRequest) Reset() { *m = CloseSessionRequest{} } +func (m *CloseSessionRequest) String() string { return proto.CompactTextString(m) } +func (*CloseSessionRequest) ProtoMessage() {} +func (*CloseSessionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5171b2a5dcde72cd, []int{8} +} + +func (m *CloseSessionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseSessionRequest.Unmarshal(m, b) +} +func (m *CloseSessionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseSessionRequest.Marshal(b, m, deterministic) +} +func (m *CloseSessionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseSessionRequest.Merge(m, src) +} +func (m *CloseSessionRequest) XXX_Size() int { + return xxx_messageInfo_CloseSessionRequest.Size(m) +} +func (m *CloseSessionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CloseSessionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseSessionRequest proto.InternalMessageInfo + +func (m *CloseSessionRequest) GetSessionHandle() string { + if m != nil { + return m.SessionHandle + } + return "" +} + +type CloseSessionResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseSessionResponse) Reset() { *m = CloseSessionResponse{} } +func (m *CloseSessionResponse) String() string { return proto.CompactTextString(m) } +func (*CloseSessionResponse) ProtoMessage() {} +func (*CloseSessionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5171b2a5dcde72cd, []int{9} +} + +func (m *CloseSessionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseSessionResponse.Unmarshal(m, b) +} +func (m *CloseSessionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseSessionResponse.Marshal(b, m, deterministic) +} +func (m *CloseSessionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseSessionResponse.Merge(m, src) +} +func (m *CloseSessionResponse) XXX_Size() int { + return xxx_messageInfo_CloseSessionResponse.Size(m) +} +func (m *CloseSessionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CloseSessionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseSessionResponse proto.InternalMessageInfo + +// Reset() allows misbehaving or slow sessions to be aborted and closed, and +// causes their resources eventually to be released. Reset() does not wait +// for the computations in old sessions to cease; it merely starts the +// process of tearing them down. However, if a new session is started after +// a Reset(), the new session is isolated from changes that old sessions +// (started prior to the Reset()) may continue to make to resources, provided +// all those resources are in containers listed in "containers". +// +// Old sessions may continue to have side-effects on resources not in +// containers listed in "containers", and thus may affect future +// sessions' results in ways that are hard to predict. Thus, if well-defined +// behavior is desired, is it recommended that all containers be listed in +// "containers". Similarly, if a device_filter is specified, results may be +// hard to predict. +type ResetRequest struct { + // A list of container names, which may be empty. + // + // If 'container' is not empty, releases resources in the given + // containers in all devices. + // + // If 'container' is empty, releases resources in the default + // container in all devices. + Container []string `protobuf:"bytes,1,rep,name=container,proto3" json:"container,omitempty"` + // When any filters are present, only devices that match the filters + // will be reset. Each filter can be partially specified, + // e.g. "/job:ps" "/job:worker/replica:3", etc. + DeviceFilters []string `protobuf:"bytes,2,rep,name=device_filters,json=deviceFilters,proto3" json:"device_filters,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResetRequest) Reset() { *m = ResetRequest{} } +func (m *ResetRequest) String() string { return proto.CompactTextString(m) } +func (*ResetRequest) ProtoMessage() {} +func (*ResetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5171b2a5dcde72cd, []int{10} +} + +func (m *ResetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResetRequest.Unmarshal(m, b) +} +func (m *ResetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResetRequest.Marshal(b, m, deterministic) +} +func (m *ResetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResetRequest.Merge(m, src) +} +func (m *ResetRequest) XXX_Size() int { + return xxx_messageInfo_ResetRequest.Size(m) +} +func (m *ResetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ResetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ResetRequest proto.InternalMessageInfo + +func (m *ResetRequest) GetContainer() []string { + if m != nil { + return m.Container + } + return nil +} + +func (m *ResetRequest) GetDeviceFilters() []string { + if m != nil { + return m.DeviceFilters + } + return nil +} + +type ResetResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResetResponse) Reset() { *m = ResetResponse{} } +func (m *ResetResponse) String() string { return proto.CompactTextString(m) } +func (*ResetResponse) ProtoMessage() {} +func (*ResetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5171b2a5dcde72cd, []int{11} +} + +func (m *ResetResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResetResponse.Unmarshal(m, b) +} +func (m *ResetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResetResponse.Marshal(b, m, deterministic) +} +func (m *ResetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResetResponse.Merge(m, src) +} +func (m *ResetResponse) XXX_Size() int { + return xxx_messageInfo_ResetResponse.Size(m) +} +func (m *ResetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ResetResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ResetResponse proto.InternalMessageInfo + +type ListDevicesRequest struct { + // Optional: session_handle must be returned by a CreateSession call to the + // same master service. + // + // When session_handle is empty, the ClusterSpec provided when the master was + // started is used to compute the available devices. If the session_handle is + // provided but not recognized, an error is returned. Finally, if a valid + // session_handle is provided, the cluster configuration for that session is + // used when computing the response. + SessionHandle string `protobuf:"bytes,1,opt,name=session_handle,json=sessionHandle,proto3" json:"session_handle,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListDevicesRequest) Reset() { *m = ListDevicesRequest{} } +func (m *ListDevicesRequest) String() string { return proto.CompactTextString(m) } +func (*ListDevicesRequest) ProtoMessage() {} +func (*ListDevicesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5171b2a5dcde72cd, []int{12} +} + +func (m *ListDevicesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListDevicesRequest.Unmarshal(m, b) +} +func (m *ListDevicesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListDevicesRequest.Marshal(b, m, deterministic) +} +func (m *ListDevicesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListDevicesRequest.Merge(m, src) +} +func (m *ListDevicesRequest) XXX_Size() int { + return xxx_messageInfo_ListDevicesRequest.Size(m) +} +func (m *ListDevicesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListDevicesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListDevicesRequest proto.InternalMessageInfo + +func (m *ListDevicesRequest) GetSessionHandle() string { + if m != nil { + return m.SessionHandle + } + return "" +} + +type ListDevicesResponse struct { + LocalDevice []*framework.DeviceAttributes `protobuf:"bytes,1,rep,name=local_device,json=localDevice,proto3" json:"local_device,omitempty"` + RemoteDevice []*framework.DeviceAttributes `protobuf:"bytes,2,rep,name=remote_device,json=remoteDevice,proto3" json:"remote_device,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListDevicesResponse) Reset() { *m = ListDevicesResponse{} } +func (m *ListDevicesResponse) String() string { return proto.CompactTextString(m) } +func (*ListDevicesResponse) ProtoMessage() {} +func (*ListDevicesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5171b2a5dcde72cd, []int{13} +} + +func (m *ListDevicesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListDevicesResponse.Unmarshal(m, b) +} +func (m *ListDevicesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListDevicesResponse.Marshal(b, m, deterministic) +} +func (m *ListDevicesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListDevicesResponse.Merge(m, src) +} +func (m *ListDevicesResponse) XXX_Size() int { + return xxx_messageInfo_ListDevicesResponse.Size(m) +} +func (m *ListDevicesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListDevicesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListDevicesResponse proto.InternalMessageInfo + +func (m *ListDevicesResponse) GetLocalDevice() []*framework.DeviceAttributes { + if m != nil { + return m.LocalDevice + } + return nil +} + +func (m *ListDevicesResponse) GetRemoteDevice() []*framework.DeviceAttributes { + if m != nil { + return m.RemoteDevice + } + return nil +} + +type MakeCallableRequest struct { + // REQUIRED: session_handle must be returned by a CreateSession call + // to the same master service. + SessionHandle string `protobuf:"bytes,1,opt,name=session_handle,json=sessionHandle,proto3" json:"session_handle,omitempty"` + // Options that define the behavior of the created callable. + Options *CallableOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` + // Unique identifier for this request. Every MakeCallableRequest must + // have a unique request_id, and retried MakeCallableRequest must have + // the same request_id. If request_id is zero, retry detection is disabled. + RequestId int64 `protobuf:"varint,3,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MakeCallableRequest) Reset() { *m = MakeCallableRequest{} } +func (m *MakeCallableRequest) String() string { return proto.CompactTextString(m) } +func (*MakeCallableRequest) ProtoMessage() {} +func (*MakeCallableRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5171b2a5dcde72cd, []int{14} +} + +func (m *MakeCallableRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MakeCallableRequest.Unmarshal(m, b) +} +func (m *MakeCallableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MakeCallableRequest.Marshal(b, m, deterministic) +} +func (m *MakeCallableRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MakeCallableRequest.Merge(m, src) +} +func (m *MakeCallableRequest) XXX_Size() int { + return xxx_messageInfo_MakeCallableRequest.Size(m) +} +func (m *MakeCallableRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MakeCallableRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MakeCallableRequest proto.InternalMessageInfo + +func (m *MakeCallableRequest) GetSessionHandle() string { + if m != nil { + return m.SessionHandle + } + return "" +} + +func (m *MakeCallableRequest) GetOptions() *CallableOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MakeCallableRequest) GetRequestId() int64 { + if m != nil { + return m.RequestId + } + return 0 +} + +type MakeCallableResponse struct { + // A handle to the created callable. + Handle int64 `protobuf:"varint,1,opt,name=handle,proto3" json:"handle,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MakeCallableResponse) Reset() { *m = MakeCallableResponse{} } +func (m *MakeCallableResponse) String() string { return proto.CompactTextString(m) } +func (*MakeCallableResponse) ProtoMessage() {} +func (*MakeCallableResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5171b2a5dcde72cd, []int{15} +} + +func (m *MakeCallableResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MakeCallableResponse.Unmarshal(m, b) +} +func (m *MakeCallableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MakeCallableResponse.Marshal(b, m, deterministic) +} +func (m *MakeCallableResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MakeCallableResponse.Merge(m, src) +} +func (m *MakeCallableResponse) XXX_Size() int { + return xxx_messageInfo_MakeCallableResponse.Size(m) +} +func (m *MakeCallableResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MakeCallableResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MakeCallableResponse proto.InternalMessageInfo + +func (m *MakeCallableResponse) GetHandle() int64 { + if m != nil { + return m.Handle + } + return 0 +} + +type RunCallableRequest struct { + // REQUIRED: session_handle must be returned by a CreateSession call + // to the same master service. + SessionHandle string `protobuf:"bytes,1,opt,name=session_handle,json=sessionHandle,proto3" json:"session_handle,omitempty"` + // REQUIRED: handle must be returned by a MakeCallable call to the same + // master service. + Handle int64 `protobuf:"varint,2,opt,name=handle,proto3" json:"handle,omitempty"` + // Values of the tensors passed as arguments to the callable, in the order + // defined in the CallableOptions.feed field passed to MakeCallable. + Feed []*framework.TensorProto `protobuf:"bytes,3,rep,name=feed,proto3" json:"feed,omitempty"` + // Unique identifier for this request. Every RunCallableRequest must + // have a unique request_id, and retried RunCallableRequest must have + // the same request_id. If request_id is zero, retry detection is disabled. + RequestId int64 `protobuf:"varint,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RunCallableRequest) Reset() { *m = RunCallableRequest{} } +func (m *RunCallableRequest) String() string { return proto.CompactTextString(m) } +func (*RunCallableRequest) ProtoMessage() {} +func (*RunCallableRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5171b2a5dcde72cd, []int{16} +} + +func (m *RunCallableRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RunCallableRequest.Unmarshal(m, b) +} +func (m *RunCallableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RunCallableRequest.Marshal(b, m, deterministic) +} +func (m *RunCallableRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunCallableRequest.Merge(m, src) +} +func (m *RunCallableRequest) XXX_Size() int { + return xxx_messageInfo_RunCallableRequest.Size(m) +} +func (m *RunCallableRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RunCallableRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RunCallableRequest proto.InternalMessageInfo + +func (m *RunCallableRequest) GetSessionHandle() string { + if m != nil { + return m.SessionHandle + } + return "" +} + +func (m *RunCallableRequest) GetHandle() int64 { + if m != nil { + return m.Handle + } + return 0 +} + +func (m *RunCallableRequest) GetFeed() []*framework.TensorProto { + if m != nil { + return m.Feed + } + return nil +} + +func (m *RunCallableRequest) GetRequestId() int64 { + if m != nil { + return m.RequestId + } + return 0 +} + +type RunCallableResponse struct { + // Values of the tensors returned by the callable, in the order defined in the + // CallableOptions.fetch field passed to MakeCallable. + Fetch []*framework.TensorProto `protobuf:"bytes,1,rep,name=fetch,proto3" json:"fetch,omitempty"` + // Returned metadata if requested in the options. + Metadata *RunMetadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RunCallableResponse) Reset() { *m = RunCallableResponse{} } +func (m *RunCallableResponse) String() string { return proto.CompactTextString(m) } +func (*RunCallableResponse) ProtoMessage() {} +func (*RunCallableResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5171b2a5dcde72cd, []int{17} +} + +func (m *RunCallableResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RunCallableResponse.Unmarshal(m, b) +} +func (m *RunCallableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RunCallableResponse.Marshal(b, m, deterministic) +} +func (m *RunCallableResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunCallableResponse.Merge(m, src) +} +func (m *RunCallableResponse) XXX_Size() int { + return xxx_messageInfo_RunCallableResponse.Size(m) +} +func (m *RunCallableResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RunCallableResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RunCallableResponse proto.InternalMessageInfo + +func (m *RunCallableResponse) GetFetch() []*framework.TensorProto { + if m != nil { + return m.Fetch + } + return nil +} + +func (m *RunCallableResponse) GetMetadata() *RunMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +type ReleaseCallableRequest struct { + // REQUIRED: session_handle must be returned by a CreateSession call + // to the same master service. + SessionHandle string `protobuf:"bytes,1,opt,name=session_handle,json=sessionHandle,proto3" json:"session_handle,omitempty"` + // REQUIRED: handle must be returned by a MakeCallable call to the same + // master service. + Handle int64 `protobuf:"varint,2,opt,name=handle,proto3" json:"handle,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReleaseCallableRequest) Reset() { *m = ReleaseCallableRequest{} } +func (m *ReleaseCallableRequest) String() string { return proto.CompactTextString(m) } +func (*ReleaseCallableRequest) ProtoMessage() {} +func (*ReleaseCallableRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5171b2a5dcde72cd, []int{18} +} + +func (m *ReleaseCallableRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReleaseCallableRequest.Unmarshal(m, b) +} +func (m *ReleaseCallableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReleaseCallableRequest.Marshal(b, m, deterministic) +} +func (m *ReleaseCallableRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReleaseCallableRequest.Merge(m, src) +} +func (m *ReleaseCallableRequest) XXX_Size() int { + return xxx_messageInfo_ReleaseCallableRequest.Size(m) +} +func (m *ReleaseCallableRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReleaseCallableRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReleaseCallableRequest proto.InternalMessageInfo + +func (m *ReleaseCallableRequest) GetSessionHandle() string { + if m != nil { + return m.SessionHandle + } + return "" +} + +func (m *ReleaseCallableRequest) GetHandle() int64 { + if m != nil { + return m.Handle + } + return 0 +} + +type ReleaseCallableResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReleaseCallableResponse) Reset() { *m = ReleaseCallableResponse{} } +func (m *ReleaseCallableResponse) String() string { return proto.CompactTextString(m) } +func (*ReleaseCallableResponse) ProtoMessage() {} +func (*ReleaseCallableResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5171b2a5dcde72cd, []int{19} +} + +func (m *ReleaseCallableResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReleaseCallableResponse.Unmarshal(m, b) +} +func (m *ReleaseCallableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReleaseCallableResponse.Marshal(b, m, deterministic) +} +func (m *ReleaseCallableResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReleaseCallableResponse.Merge(m, src) +} +func (m *ReleaseCallableResponse) XXX_Size() int { + return xxx_messageInfo_ReleaseCallableResponse.Size(m) +} +func (m *ReleaseCallableResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReleaseCallableResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReleaseCallableResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CreateSessionRequest)(nil), "tensorflow.CreateSessionRequest") + proto.RegisterType((*CreateSessionResponse)(nil), "tensorflow.CreateSessionResponse") + proto.RegisterType((*ExtendSessionRequest)(nil), "tensorflow.ExtendSessionRequest") + proto.RegisterType((*ExtendSessionResponse)(nil), "tensorflow.ExtendSessionResponse") + proto.RegisterType((*RunStepRequest)(nil), "tensorflow.RunStepRequest") + proto.RegisterType((*RunStepResponse)(nil), "tensorflow.RunStepResponse") + proto.RegisterType((*PartialRunSetupRequest)(nil), "tensorflow.PartialRunSetupRequest") + proto.RegisterType((*PartialRunSetupResponse)(nil), "tensorflow.PartialRunSetupResponse") + proto.RegisterType((*CloseSessionRequest)(nil), "tensorflow.CloseSessionRequest") + proto.RegisterType((*CloseSessionResponse)(nil), "tensorflow.CloseSessionResponse") + proto.RegisterType((*ResetRequest)(nil), "tensorflow.ResetRequest") + proto.RegisterType((*ResetResponse)(nil), "tensorflow.ResetResponse") + proto.RegisterType((*ListDevicesRequest)(nil), "tensorflow.ListDevicesRequest") + proto.RegisterType((*ListDevicesResponse)(nil), "tensorflow.ListDevicesResponse") + proto.RegisterType((*MakeCallableRequest)(nil), "tensorflow.MakeCallableRequest") + proto.RegisterType((*MakeCallableResponse)(nil), "tensorflow.MakeCallableResponse") + proto.RegisterType((*RunCallableRequest)(nil), "tensorflow.RunCallableRequest") + proto.RegisterType((*RunCallableResponse)(nil), "tensorflow.RunCallableResponse") + proto.RegisterType((*ReleaseCallableRequest)(nil), "tensorflow.ReleaseCallableRequest") + proto.RegisterType((*ReleaseCallableResponse)(nil), "tensorflow.ReleaseCallableResponse") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/master.proto", fileDescriptor_5171b2a5dcde72cd) +} + +var fileDescriptor_5171b2a5dcde72cd = []byte{ + // 954 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x51, 0x6f, 0x23, 0xb5, + 0x13, 0xd7, 0x26, 0x6d, 0xae, 0x99, 0x36, 0xed, 0xff, 0xef, 0xa4, 0xe9, 0x5e, 0xb9, 0x93, 0xa2, + 0x45, 0x45, 0xd1, 0x1d, 0x24, 0xbd, 0x1e, 0x88, 0x07, 0x4e, 0x82, 0xbb, 0xf4, 0x28, 0x27, 0x51, + 0xa8, 0x5c, 0x04, 0x12, 0x2f, 0x2b, 0x67, 0x77, 0x92, 0xae, 0x6e, 0x63, 0x07, 0xdb, 0x7b, 0xa5, + 0x5f, 0x83, 0x37, 0x9e, 0xe0, 0x81, 0x0f, 0xc5, 0x37, 0xe0, 0x2b, 0xf0, 0x88, 0x62, 0x3b, 0xc9, + 0xee, 0x36, 0x45, 0x6d, 0xc5, 0xdb, 0x7a, 0xfc, 0x9b, 0x99, 0xdf, 0xfc, 0x3c, 0x1e, 0x2f, 0x1c, + 0x68, 0xe4, 0x4a, 0xc8, 0x51, 0x2a, 0x2e, 0xfb, 0x91, 0x90, 0xd8, 0x9f, 0x4a, 0xa1, 0xc5, 0x30, + 0x1b, 0xf5, 0x27, 0x4c, 0x69, 0x94, 0x3d, 0xb3, 0x26, 0xb0, 0x84, 0xed, 0x3f, 0x2b, 0xbb, 0x8c, + 0x24, 0x9b, 0xe0, 0xa5, 0x90, 0x6f, 0xfb, 0x31, 0xbe, 0x4b, 0x22, 0x0c, 0x99, 0xd6, 0x32, 0x19, + 0x66, 0x1a, 0x95, 0x75, 0xdf, 0x3f, 0xb8, 0xd9, 0x65, 0x2c, 0xd9, 0xf4, 0xc2, 0xc1, 0x3e, 0xb8, + 0x19, 0x66, 0x77, 0x1c, 0xee, 0x49, 0x19, 0x97, 0x26, 0x43, 0xfb, 0x81, 0x52, 0x0a, 0x19, 0x46, + 0x22, 0xbe, 0x39, 0xf5, 0xa2, 0xc0, 0x48, 0xf0, 0x51, 0x32, 0x76, 0xb0, 0xa7, 0x37, 0xc2, 0x38, + 0x9b, 0x60, 0x1c, 0xe6, 0xf3, 0x07, 0xbf, 0x78, 0xd0, 0x1a, 0x48, 0x64, 0x1a, 0xcf, 0x51, 0xa9, + 0x44, 0x70, 0x8a, 0x3f, 0x65, 0xa8, 0x34, 0x79, 0x06, 0x75, 0x53, 0x4f, 0x18, 0xe3, 0xc8, 0xf7, + 0x3a, 0x5e, 0x77, 0xf3, 0xa8, 0xd5, 0x5b, 0x46, 0xee, 0x9d, 0xcc, 0x36, 0x8f, 0x71, 0x44, 0x37, + 0xc6, 0xee, 0x8b, 0xf4, 0xa1, 0x66, 0x89, 0xf8, 0x15, 0x83, 0xdf, 0xcb, 0xe3, 0x07, 0x66, 0xe7, + 0x6c, 0x96, 0x94, 0x3a, 0x18, 0x69, 0x43, 0x4d, 0x33, 0x39, 0x46, 0xed, 0x57, 0x3b, 0x5e, 0xb7, + 0x4e, 0xdd, 0x2a, 0x88, 0x60, 0xb7, 0xc4, 0x49, 0x4d, 0x05, 0x57, 0x48, 0x0e, 0x60, 0x5b, 0x59, + 0x53, 0x78, 0xc1, 0x78, 0x9c, 0xa2, 0x61, 0x56, 0xa7, 0x0d, 0x67, 0xfd, 0xca, 0x18, 0xc9, 0xfb, + 0xd0, 0xb0, 0xdc, 0xdf, 0xa1, 0x9c, 0x99, 0x0d, 0x9f, 0x2a, 0xdd, 0x32, 0xc6, 0xef, 0xad, 0x2d, + 0xf8, 0xc3, 0x83, 0xd6, 0xeb, 0x9f, 0x35, 0xf2, 0xb8, 0x54, 0xf9, 0x2d, 0x93, 0x14, 0x04, 0xaa, + 0xdc, 0x4a, 0xa0, 0x23, 0xd8, 0x8d, 0x32, 0x29, 0x91, 0xeb, 0xb0, 0xc8, 0xaf, 0x6a, 0xf8, 0x35, + 0xdd, 0xe6, 0x49, 0x9e, 0xe6, 0x00, 0x76, 0x4b, 0x2c, 0x9d, 0x16, 0x4f, 0xe0, 0xff, 0x1c, 0x2f, + 0x4b, 0x81, 0xd6, 0x4c, 0xa0, 0x1d, 0x8e, 0x97, 0x85, 0x20, 0x7f, 0x56, 0x60, 0x9b, 0x66, 0xfc, + 0x5c, 0xe3, 0xf4, 0x8e, 0x55, 0x1e, 0xc2, 0xda, 0x08, 0x31, 0xf6, 0x2b, 0x9d, 0x6a, 0x77, 0xf3, + 0xe8, 0x51, 0xbe, 0xc0, 0x6f, 0x66, 0xdd, 0xf4, 0x9d, 0x59, 0xdb, 0x63, 0x35, 0x48, 0xd2, 0x82, + 0xf5, 0x11, 0xea, 0xe8, 0xc2, 0xaf, 0x76, 0xaa, 0xdd, 0x3a, 0xb5, 0x8b, 0xdc, 0x51, 0xaf, 0x19, + 0xb3, 0x5b, 0x91, 0x43, 0x78, 0x20, 0xa6, 0x3a, 0x11, 0x5c, 0xf9, 0xeb, 0x46, 0xc3, 0x76, 0x3e, + 0x05, 0xcd, 0xf8, 0xb7, 0x76, 0x97, 0xce, 0x61, 0xe4, 0x43, 0x20, 0x53, 0x26, 0x75, 0xc2, 0xd2, + 0x50, 0x66, 0x0b, 0xf2, 0x35, 0x43, 0xfe, 0x7f, 0x6e, 0x87, 0x66, 0x73, 0xfe, 0x5f, 0xc0, 0x63, + 0xa5, 0x85, 0xc4, 0xd0, 0x5c, 0x27, 0x15, 0x26, 0x3c, 0x94, 0x4e, 0xc1, 0x70, 0x28, 0xe2, 0x2b, + 0xff, 0x41, 0xc7, 0xeb, 0x6e, 0xd0, 0x87, 0x06, 0xf4, 0xda, 0x60, 0xde, 0x2c, 0x34, 0x7e, 0x25, + 0xe2, 0x2b, 0xf2, 0x18, 0x40, 0x5a, 0xcd, 0xc2, 0x24, 0xf6, 0x37, 0x8c, 0xc0, 0x75, 0x67, 0x79, + 0x13, 0x07, 0x7f, 0x79, 0xb0, 0xb3, 0x90, 0xd6, 0x1d, 0xcd, 0xc7, 0x50, 0xb3, 0x45, 0xf8, 0xde, + 0x2d, 0x64, 0x73, 0x58, 0xf2, 0x1c, 0x36, 0x26, 0xa8, 0x59, 0xcc, 0x34, 0x5b, 0x75, 0x81, 0x68, + 0xc6, 0x4f, 0xdd, 0x36, 0x5d, 0x00, 0xc9, 0xa7, 0xb0, 0xa9, 0x34, 0xd3, 0x99, 0x32, 0x93, 0xc2, + 0x34, 0xd2, 0x76, 0x51, 0x43, 0x53, 0x78, 0x6f, 0x20, 0x62, 0xa4, 0x60, 0xa1, 0xb3, 0x6f, 0x72, + 0x08, 0x2d, 0xe7, 0x68, 0x07, 0xcd, 0x04, 0x95, 0x62, 0x63, 0x34, 0x1d, 0x54, 0xa7, 0xc4, 0xee, + 0x19, 0x41, 0x4e, 0xed, 0x4e, 0xf0, 0xbb, 0x07, 0xed, 0xb3, 0x85, 0xbe, 0xe7, 0xa8, 0xb3, 0xbb, + 0x36, 0x13, 0xc9, 0x35, 0x53, 0xfd, 0x5e, 0xed, 0x52, 0x3c, 0x8c, 0xf5, 0xf2, 0x61, 0x9c, 0xc0, + 0xde, 0x35, 0x86, 0xee, 0x4c, 0x56, 0xb7, 0x8d, 0xb7, 0xba, 0x6d, 0x82, 0x17, 0xd0, 0x1c, 0xa4, + 0x42, 0xe1, 0xbd, 0x46, 0x43, 0xd0, 0x86, 0x56, 0xd1, 0xdb, 0x72, 0x08, 0xce, 0x61, 0x8b, 0xa2, + 0x42, 0x3d, 0x0f, 0xf7, 0x08, 0xea, 0x91, 0xe0, 0x9a, 0x25, 0x1c, 0x6d, 0xab, 0xd4, 0xe9, 0xd2, + 0x30, 0x4b, 0xe6, 0x1e, 0xa1, 0x51, 0x92, 0x6a, 0x94, 0xca, 0xe9, 0xd6, 0xb0, 0xd6, 0x2f, 0xad, + 0x31, 0xd8, 0x81, 0x86, 0x0b, 0xea, 0xb2, 0x7c, 0x06, 0xe4, 0xeb, 0x44, 0xe9, 0x63, 0x83, 0x52, + 0x77, 0xa4, 0xfe, 0xab, 0x07, 0xcd, 0x82, 0xb7, 0x93, 0xef, 0x73, 0xd8, 0x4a, 0x45, 0xc4, 0xd2, + 0xd0, 0x26, 0x5f, 0xd5, 0xd8, 0xd6, 0xe5, 0xe5, 0xe2, 0xc1, 0xa4, 0x9b, 0xc6, 0xc3, 0x9a, 0xc9, + 0x4b, 0x68, 0x48, 0x9c, 0x08, 0x8d, 0xf3, 0x08, 0x95, 0x5b, 0x44, 0xd8, 0xb2, 0x2e, 0xd6, 0x3e, + 0x7b, 0xab, 0x9a, 0xa7, 0xec, 0x2d, 0x0e, 0x58, 0x9a, 0xb2, 0x61, 0x8a, 0x77, 0xec, 0xbe, 0x4f, + 0x96, 0xa3, 0xc6, 0x5e, 0xaf, 0xf7, 0x0a, 0xef, 0x93, 0x0b, 0x7a, 0x6d, 0xde, 0x14, 0x5b, 0xae, + 0x5a, 0x6e, 0xb9, 0x1e, 0xb4, 0x8a, 0x9c, 0x9c, 0x60, 0x6d, 0xa8, 0xe5, 0xc8, 0x54, 0xa9, 0x5b, + 0x05, 0xbf, 0x79, 0x40, 0x68, 0xc6, 0xef, 0x59, 0xc3, 0x32, 0x6a, 0x25, 0x1f, 0x95, 0x3c, 0x75, + 0x37, 0xab, 0x6a, 0x44, 0x2d, 0xcc, 0x8d, 0xeb, 0x13, 0xba, 0x58, 0xd1, 0x5a, 0xb9, 0xa2, 0x2b, + 0x68, 0x16, 0x08, 0xba, 0x82, 0x3e, 0x9a, 0x5f, 0x54, 0xef, 0xdf, 0x73, 0xb8, 0x1b, 0x7c, 0x9f, + 0x69, 0x16, 0xfc, 0x00, 0x6d, 0x8a, 0x29, 0x32, 0x85, 0xff, 0xad, 0x3e, 0xc1, 0x43, 0xd8, 0xbb, + 0x16, 0xd8, 0xcd, 0xf8, 0x2b, 0xd8, 0x17, 0x72, 0x9c, 0xe7, 0x16, 0x27, 0x4a, 0xcb, 0x8c, 0xeb, + 0x64, 0x82, 0xaf, 0xfc, 0xe3, 0xd9, 0xc2, 0xb4, 0x63, 0x4c, 0xad, 0xcd, 0x94, 0xa9, 0xce, 0xbc, + 0x1f, 0x5f, 0x8c, 0x13, 0x7d, 0x91, 0x0d, 0x7b, 0x91, 0x98, 0xf4, 0x73, 0xff, 0x5c, 0xab, 0x3f, + 0xc7, 0xa2, 0xf8, 0x33, 0xf6, 0xb7, 0xe7, 0x0d, 0x6b, 0x66, 0xf1, 0xfc, 0x9f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x38, 0x2f, 0xfa, 0xa4, 0xba, 0x0a, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/master.proto b/executor/proto/tensorflow/core/protobuf/master.proto new file mode 100644 index 0000000000..9addf67908 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/master.proto @@ -0,0 +1,353 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto3"; + +package tensorflow; + +option cc_enable_arenas = true; +option java_outer_classname = "DistributedRuntimeProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.distruntime"; + +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf"; +import "tensorflow/core/framework/device_attributes.proto"; +import "tensorflow/core/framework/graph.proto"; +import "tensorflow/core/framework/tensor.proto"; +import "tensorflow/core/lib/core/error_codes.proto"; +import "tensorflow/core/protobuf/config.proto"; +import "tensorflow/core/protobuf/named_tensor.proto"; + +//////////////////////////////////////////////////////////////////////////////// +// +// CreateSession method request/response protos. +// +//////////////////////////////////////////////////////////////////////////////// + +message CreateSessionRequest { + // The initial graph definition. + GraphDef graph_def = 1; + + // Configuration options. + ConfigProto config = 2; + + // The target string used from the client's perspective. + string target = 3; +} + +message CreateSessionResponse { + // The session handle to be used in subsequent calls for the created session. + // + // The client must arrange to call CloseSession with this returned + // session handle to close the session. + string session_handle = 1; + + // The initial version number for the graph, to be used in the next call + // to ExtendSession. + int64 graph_version = 2; +} + +//////////////////////////////////////////////////////////////////////////////// +// +// ExtendSession method request/response protos. +// +// The "graph_def" specifies a set of nodes to be added to the session's graph. +// +// A typical "graph_def" will contain: +// +// * Zero or more new nodes with names that do not exist in the server-side +// graph. These will be added to the graph. +// +// PRECONDITION: The server-side current version is req.current_version. +// None of the names in req.graph_def appeared in previous successful calls to +// CreateSession or ExtendSession with the same session_handle. +// POSTCONDITION: The server-side current version is resp.new_version. +// +//////////////////////////////////////////////////////////////////////////////// + +message ExtendSessionRequest { + // REQUIRED: session_handle must be returned by a CreateSession call + // to the same master service. + string session_handle = 1; + + // REQUIRED: The nodes to be added to the session's graph. If any node has + // the same name as an existing node, the operation will fail with + // ILLEGAL_ARGUMENT. + GraphDef graph_def = 2; + + // REQUIRED: The version number of the graph to be extended. This will be + // tested against the current server-side version number, and the operation + // will fail with FAILED_PRECONDITION if they do not match. + int64 current_graph_version = 3; +} + +message ExtendSessionResponse { + // TODO(mrry): Return something about the operation? + + // The new version number for the extended graph, to be used in the next call + // to ExtendSession. + int64 new_graph_version = 4; +} + +//////////////////////////////////////////////////////////////////////////////// +// +// RunStep method request/response protos. +// +// The caller should provide the feeds needed by the graph and specify +// what nodes should be fetched. +// +//////////////////////////////////////////////////////////////////////////////// + +message RunStepRequest { + // REQUIRED: session_handle must be returned by a CreateSession call + // to the same master service. + string session_handle = 1; + + // Tensors to be fed in the step. Each feed is a named tensor. + repeated NamedTensorProto feed = 2; + + // Fetches. A list of tensor names. The caller expects a tensor to + // be returned for each fetch[i] (see RunStepResponse.tensor). The + // order of specified fetches does not change the execution order. + repeated string fetch = 3; + + // Target Nodes. A list of node names. The named nodes will be run + // to but their outputs will not be fetched. + repeated string target = 4; + + // Options for the run call. + RunOptions options = 5; + + // Partial run handle (optional). If specified, this will be a partial run + // execution, run up to the specified fetches. + string partial_run_handle = 6; + + // If true then some errors, e.g., execution errors that have long + // error messages, may return an OK RunStepResponse with the actual + // error saved in the status_code/status_error_message fields of the + // response body. This is a workaround since the RPC subsystem may + // truncate long metadata messages. + bool store_errors_in_response_body = 7; + + // Unique identifier for this request. Every RunStepRequest must + // have a unique request_id, and retried RunStepRequest must have + // the same request_id. If request_id is zero, retry detection is disabled. + int64 request_id = 8; +} + +message RunStepResponse { + // NOTE: The order of the returned tensors may or may not match + // the fetch order specified in RunStepRequest. + repeated NamedTensorProto tensor = 1; + + // Returned metadata if requested in the options. + RunMetadata metadata = 2; + + // If store_errors_in_response_body is true in the request, then + // optionally the server may return an OK status for the RPC and + // fill the true status into the fields below, to allow for messages + // that are too long to fit in metadata. + error.Code status_code = 3; + string status_error_message = 4; +} + +//////////////////////////////////////////////////////////////////////////////// +// +// PartialRunSetup method request/response protos. +// +// The caller should provide the future partial run feeds, fetches, and targets. +// Then the caller can use RunStepRequest with is_partial set to make partial +// run calls. +// +//////////////////////////////////////////////////////////////////////////////// + +message PartialRunSetupRequest { + // REQUIRED: session_handle must be returned by a CreateSession call + // to the same master service. + string session_handle = 1; + + // Tensors to be fed in future steps. + repeated string feed = 2; + + // Fetches. A list of tensor names. The caller expects a tensor to be returned + // for each fetch[i] (see RunStepResponse.tensor), for corresponding partial + // RunStepRequests. The order of specified fetches does not change the + // execution order. + repeated string fetch = 3; + + // Target Nodes. A list of node names. The named nodes will be run in future + // steps, but their outputs will not be fetched. + repeated string target = 4; + + // Unique identifier for this request. Every PartialRunSetupRequest must + // have a unique request_id, and retried PartialRunSetupRequest must have + // the same request_id. If request_id is zero, retry detection is disabled. + int64 request_id = 5; +} + +message PartialRunSetupResponse { + // The unique handle corresponding to the ongoing partial run call setup by + // the invocation to PartialRunSetup. This handle may be passed to + // RunStepRequest to send and receive tensors for this partial run. + string partial_run_handle = 1; +} + +//////////////////////////////////////////////////////////////////////////////// +// +// CloseSession method request/response protos. +// +//////////////////////////////////////////////////////////////////////////////// + +message CloseSessionRequest { + // REQUIRED: session_handle must be returned by a CreateSession call + // to the same master service. + string session_handle = 1; +} + +message CloseSessionResponse {} + +// Reset() allows misbehaving or slow sessions to be aborted and closed, and +// causes their resources eventually to be released. Reset() does not wait +// for the computations in old sessions to cease; it merely starts the +// process of tearing them down. However, if a new session is started after +// a Reset(), the new session is isolated from changes that old sessions +// (started prior to the Reset()) may continue to make to resources, provided +// all those resources are in containers listed in "containers". +// +// Old sessions may continue to have side-effects on resources not in +// containers listed in "containers", and thus may affect future +// sessions' results in ways that are hard to predict. Thus, if well-defined +// behavior is desired, is it recommended that all containers be listed in +// "containers". Similarly, if a device_filter is specified, results may be +// hard to predict. +message ResetRequest { + // A list of container names, which may be empty. + // + // If 'container' is not empty, releases resources in the given + // containers in all devices. + // + // If 'container' is empty, releases resources in the default + // container in all devices. + repeated string container = 1; + + // When any filters are present, only devices that match the filters + // will be reset. Each filter can be partially specified, + // e.g. "/job:ps" "/job:worker/replica:3", etc. + repeated string device_filters = 2; +} + +message ResetResponse {} + +//////////////////////////////////////////////////////////////////////////////// +// +// ListDevices method request/response protos. +// +// Returns information about the TensorFlow devices that are available +// to this master. +// +//////////////////////////////////////////////////////////////////////////////// + +message ListDevicesRequest { + // Optional: session_handle must be returned by a CreateSession call to the + // same master service. + // + // When session_handle is empty, the ClusterSpec provided when the master was + // started is used to compute the available devices. If the session_handle is + // provided but not recognized, an error is returned. Finally, if a valid + // session_handle is provided, the cluster configuration for that session is + // used when computing the response. + string session_handle = 1; +} + +message ListDevicesResponse { + repeated DeviceAttributes local_device = 1; + repeated DeviceAttributes remote_device = 2; +} + +//////////////////////////////////////////////////////////////////////////////// +// +// MakeCallable method request/response protos. +// +//////////////////////////////////////////////////////////////////////////////// + +message MakeCallableRequest { + // REQUIRED: session_handle must be returned by a CreateSession call + // to the same master service. + string session_handle = 1; + + // Options that define the behavior of the created callable. + CallableOptions options = 2; + + // Unique identifier for this request. Every MakeCallableRequest must + // have a unique request_id, and retried MakeCallableRequest must have + // the same request_id. If request_id is zero, retry detection is disabled. + int64 request_id = 3; +} + +message MakeCallableResponse { + // A handle to the created callable. + int64 handle = 1; +} + +//////////////////////////////////////////////////////////////////////////////// +// +// RunCallable method request/response protos. +// +//////////////////////////////////////////////////////////////////////////////// + +message RunCallableRequest { + // REQUIRED: session_handle must be returned by a CreateSession call + // to the same master service. + string session_handle = 1; + // REQUIRED: handle must be returned by a MakeCallable call to the same + // master service. + int64 handle = 2; + + // Values of the tensors passed as arguments to the callable, in the order + // defined in the CallableOptions.feed field passed to MakeCallable. + repeated TensorProto feed = 3; + + // Unique identifier for this request. Every RunCallableRequest must + // have a unique request_id, and retried RunCallableRequest must have + // the same request_id. If request_id is zero, retry detection is disabled. + int64 request_id = 4; +} + +message RunCallableResponse { + // Values of the tensors returned by the callable, in the order defined in the + // CallableOptions.fetch field passed to MakeCallable. + repeated TensorProto fetch = 1; + + // Returned metadata if requested in the options. + RunMetadata metadata = 2; +} + +//////////////////////////////////////////////////////////////////////////////// +// +// ReleaseCallable method request/response protos. +// +//////////////////////////////////////////////////////////////////////////////// + +message ReleaseCallableRequest { + // REQUIRED: session_handle must be returned by a CreateSession call + // to the same master service. + string session_handle = 1; + + // REQUIRED: handle must be returned by a MakeCallable call to the same + // master service. + int64 handle = 2; +} + +message ReleaseCallableResponse {} diff --git a/executor/proto/tensorflow/core/protobuf/master_service.pb.go b/executor/proto/tensorflow/core/protobuf/master_service.pb.go new file mode 100644 index 0000000000..adf5220342 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/master_service.pb.go @@ -0,0 +1,52 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/master_service.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/master_service.proto", fileDescriptor_aec3657ea3852a92) +} + +var fileDescriptor_aec3657ea3852a92 = []byte{ + // 368 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0x41, 0x4f, 0xea, 0x40, + 0x10, 0xc7, 0xf3, 0x0e, 0x8f, 0x97, 0xec, 0x7b, 0x84, 0xa4, 0xef, 0xa2, 0x35, 0x01, 0xc4, 0x78, + 0xb4, 0x4d, 0xf4, 0xca, 0x09, 0xf4, 0x26, 0x06, 0x8b, 0x27, 0x2e, 0x66, 0x5b, 0x86, 0xda, 0x58, + 0xba, 0x65, 0x67, 0x56, 0xfd, 0x6e, 0x7e, 0x39, 0xd3, 0xd2, 0xd2, 0xdd, 0xba, 0xe0, 0x6d, 0x33, + 0xbf, 0xff, 0xfc, 0xd2, 0xd9, 0xee, 0xb0, 0x2b, 0x82, 0x0c, 0x85, 0x5c, 0xa7, 0xe2, 0xdd, 0x8f, + 0x84, 0x04, 0x3f, 0x97, 0x82, 0x44, 0xa8, 0xd6, 0xfe, 0x86, 0x23, 0x81, 0x7c, 0x46, 0x90, 0x6f, + 0x49, 0x04, 0x5e, 0x59, 0x77, 0x7a, 0x4d, 0xdc, 0x8b, 0x65, 0x1e, 0xb9, 0x97, 0x3f, 0xf4, 0xef, + 0xfa, 0xae, 0x3f, 0x3b, 0xac, 0x3b, 0x2b, 0x0b, 0x8b, 0x9d, 0xcf, 0x79, 0x62, 0xdd, 0xa9, 0x04, + 0x4e, 0xb0, 0x00, 0xc4, 0x44, 0x64, 0xce, 0xd0, 0xd3, 0xdc, 0x06, 0x0a, 0x60, 0xab, 0x00, 0xc9, + 0x3d, 0x3f, 0x92, 0xc0, 0x5c, 0x64, 0x58, 0x5a, 0xef, 0x3e, 0x08, 0xb2, 0x95, 0xd5, 0x6a, 0x20, + 0xab, 0xb5, 0x95, 0xa8, 0xac, 0x4b, 0xd6, 0x9b, 0x73, 0x49, 0x09, 0x4f, 0x03, 0x95, 0x2d, 0x80, + 0x54, 0xee, 0x8c, 0xf4, 0xae, 0x16, 0xac, 0xcd, 0x17, 0x47, 0x33, 0x95, 0x7b, 0xc2, 0xfe, 0x14, + 0x35, 0x82, 0xdc, 0x71, 0xf5, 0x7c, 0x55, 0xac, 0x5d, 0x67, 0x56, 0x56, 0x39, 0x1e, 0xd9, 0xbf, + 0x69, 0x2a, 0x70, 0x7f, 0x95, 0x03, 0xe3, 0xa2, 0x34, 0x52, 0xdb, 0x86, 0x87, 0x03, 0x95, 0xf2, + 0x81, 0xfd, 0xbd, 0x4f, 0x90, 0x6e, 0xa1, 0xf8, 0x59, 0xe8, 0xf4, 0xf5, 0x06, 0x0d, 0xd4, 0xc2, + 0xc1, 0x41, 0x5e, 0xf9, 0xc6, 0xec, 0x77, 0x00, 0x08, 0xe4, 0x9c, 0x18, 0x83, 0x14, 0xa5, 0xda, + 0x71, 0x6a, 0x21, 0xcd, 0x80, 0x33, 0xfe, 0x0a, 0x53, 0x9e, 0xa6, 0x3c, 0x4c, 0xc1, 0x1c, 0x50, + 0x27, 0xd6, 0x01, 0xcd, 0x40, 0x33, 0x60, 0xa0, 0xb2, 0xbd, 0xb1, 0xdf, 0xba, 0xdf, 0xb6, 0x70, + 0x70, 0x90, 0x37, 0x6f, 0x24, 0x80, 0x14, 0x38, 0x36, 0x5f, 0x39, 0x32, 0x07, 0x32, 0xa0, 0xf5, + 0x8d, 0x7c, 0xcb, 0xec, 0xdc, 0x93, 0x2d, 0x73, 0x85, 0x8c, 0xf5, 0xe4, 0x2a, 0x41, 0x92, 0x2a, + 0xa3, 0x64, 0x03, 0x93, 0xff, 0xc6, 0x62, 0xcd, 0x8b, 0x7d, 0xc3, 0xf9, 0xaf, 0xe5, 0x38, 0x4e, + 0xe8, 0x45, 0x85, 0x5e, 0x24, 0x36, 0xbe, 0xb6, 0xa4, 0xf6, 0x63, 0x2c, 0xcc, 0xed, 0x0d, 0x3b, + 0xe5, 0xe9, 0xe6, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x86, 0x09, 0x2b, 0x20, 0x04, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/master_service.proto b/executor/proto/tensorflow/core/protobuf/master_service.proto new file mode 100644 index 0000000000..ce0e4f6435 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/master_service.proto @@ -0,0 +1,119 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto3"; + +package tensorflow.grpc; +option java_outer_classname = "MasterServiceProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.distruntime"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf"; +import "tensorflow/core/protobuf/master.proto"; + +//////////////////////////////////////////////////////////////////////////////// +// +// MasterService defines a TensorFlow service with which a client can +// interact to execute a distributed TensorFlow computation. +// +// A master service keeps track of multiple "master sessions". Each +// session encapsulates a computation graph and its associated state, +// and typically corresponds to a single "client session" (e.g. a +// `tensorflow::Session` instance). +// +// A session is responsible for the following: +// * assigning each node to a device (locally or remotely) using a +// placement algorithm. This may make decisions based on collected +// statistics from the workers in the system (e.g., memory usage, +// bandwidth consumption, etc.) +// +// * inserting intermediate nodes and edges to support cross-device +// and cross-process data flows and resource management. +// +// * issuing commands to workers to execute the subgraphs associated +// with those workers. +// +// Typically, a client carries out an iterative computation +// (e.g. training) by invoking RPCs against the master in a +// client-side loop. The client first creates a client session that +// connects to a particular master (using gRPC for example). The +// master creates a corresponding master session that is hosted on +// the master and caches state between the client's invocations. +// +// After the session is established, the master returns an opaque +// handle to the client that can be used to associate the client and +// master sessions. +// +// The client may send an initial graph to the master in the +// CreateSession call, and add nodes to the graph using ExtendSession. +// +// The most frequent operation a master is "RunStep", which implements +// the `Session::Run()` API. It supports feeding in arguments, +// executing a dataflow computation, and fetching arguments. +// +// Finally, when the client no longer needs the session, it should +// close the session by invoking CloseSession, which allows the master +// to reclaim resources associated with the session. The master may +// implement a garbage collection scheme that closes sessions that +// have been inactive for some time. +// +// For example, the following pseudo-code illustrates how a client +// interacts with a master: +// +// stub = NewStub("/job:mnist/replica:0/task:0") +// {handle} = stub->CreateSession({graph_def}) +// do { +// stub->RunStep({handle, {feeds}, {fetches}}) +// // The client can evaluate a predicate locally, based on the +// // result of `fetches`, to determine whether to terminate. For +// // example, it might fetch the loss and evaluate whether it is less +// // than some threshold. +// } while (!should_stop({fetches})); +// stub->CloseSession({handle}) +// +//////////////////////////////////////////////////////////////////////////////// + +service MasterService { + // Creates a session. + rpc CreateSession(CreateSessionRequest) returns (CreateSessionResponse); + + // Extends a session. + rpc ExtendSession(ExtendSessionRequest) returns (ExtendSessionResponse); + + // Prepares future partial run calls. + rpc PartialRunSetup(PartialRunSetupRequest) returns (PartialRunSetupResponse); + + // Drives the graph computation. + rpc RunStep(RunStepRequest) returns (RunStepResponse); + + // Closes a session. + rpc CloseSession(CloseSessionRequest) returns (CloseSessionResponse); + + // List the devices usable by the master. + rpc ListDevices(ListDevicesRequest) returns (ListDevicesResponse); + + // Close and abandon all existing sessions. Ongoing computations + // will no longer affect fresh ones via the resources in containers listed in + // the ResetRequest. See ResetRequest for more details. + rpc Reset(ResetRequest) returns (ResetResponse); + + // Registers a callable for execution with RunCallable. + rpc MakeCallable(MakeCallableRequest) returns (MakeCallableResponse); + + // Executes a callable registered with MakeCallable. + rpc RunCallable(RunCallableRequest) returns (RunCallableResponse); + + // Frees resources associated with a callable registered with MakeCallable. + rpc ReleaseCallable(ReleaseCallableRequest) returns (ReleaseCallableResponse); +} diff --git a/executor/proto/tensorflow/core/protobuf/meta_graph.pb.go b/executor/proto/tensorflow/core/protobuf/meta_graph.pb.go new file mode 100644 index 0000000000..2ecd6d31bc --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/meta_graph.pb.go @@ -0,0 +1,1151 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/meta_graph.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + framework "github.com/tensorflow/tensorflow/tensorflow/go/core/framework" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// NOTE: This protocol buffer is evolving, and will go through revisions in the +// coming months. +// +// Protocol buffer containing the following which are necessary to restart +// training, run inference. It can be used to serialize/de-serialize memory +// objects necessary for running computation in a graph when crossing the +// process boundary. It can be used for long term storage of graphs, +// cross-language execution of graphs, etc. +// MetaInfoDef +// GraphDef +// SaverDef +// CollectionDef +// TensorInfo +// SignatureDef +type MetaGraphDef struct { + MetaInfoDef *MetaGraphDef_MetaInfoDef `protobuf:"bytes,1,opt,name=meta_info_def,json=metaInfoDef,proto3" json:"meta_info_def,omitempty"` + // GraphDef. + GraphDef *framework.GraphDef `protobuf:"bytes,2,opt,name=graph_def,json=graphDef,proto3" json:"graph_def,omitempty"` + // SaverDef. + SaverDef *SaverDef `protobuf:"bytes,3,opt,name=saver_def,json=saverDef,proto3" json:"saver_def,omitempty"` + // collection_def: Map from collection name to collections. + // See CollectionDef section for details. + CollectionDef map[string]*CollectionDef `protobuf:"bytes,4,rep,name=collection_def,json=collectionDef,proto3" json:"collection_def,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // signature_def: Map from user supplied key for a signature to a single + // SignatureDef. + SignatureDef map[string]*SignatureDef `protobuf:"bytes,5,rep,name=signature_def,json=signatureDef,proto3" json:"signature_def,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Asset file def to be used with the defined graph. + AssetFileDef []*AssetFileDef `protobuf:"bytes,6,rep,name=asset_file_def,json=assetFileDef,proto3" json:"asset_file_def,omitempty"` + // Extra information about the structure of functions and stateful objects. + ObjectGraphDef *SavedObjectGraph `protobuf:"bytes,7,opt,name=object_graph_def,json=objectGraphDef,proto3" json:"object_graph_def,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetaGraphDef) Reset() { *m = MetaGraphDef{} } +func (m *MetaGraphDef) String() string { return proto.CompactTextString(m) } +func (*MetaGraphDef) ProtoMessage() {} +func (*MetaGraphDef) Descriptor() ([]byte, []int) { + return fileDescriptor_e94adf32e895c059, []int{0} +} + +func (m *MetaGraphDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetaGraphDef.Unmarshal(m, b) +} +func (m *MetaGraphDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetaGraphDef.Marshal(b, m, deterministic) +} +func (m *MetaGraphDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetaGraphDef.Merge(m, src) +} +func (m *MetaGraphDef) XXX_Size() int { + return xxx_messageInfo_MetaGraphDef.Size(m) +} +func (m *MetaGraphDef) XXX_DiscardUnknown() { + xxx_messageInfo_MetaGraphDef.DiscardUnknown(m) +} + +var xxx_messageInfo_MetaGraphDef proto.InternalMessageInfo + +func (m *MetaGraphDef) GetMetaInfoDef() *MetaGraphDef_MetaInfoDef { + if m != nil { + return m.MetaInfoDef + } + return nil +} + +func (m *MetaGraphDef) GetGraphDef() *framework.GraphDef { + if m != nil { + return m.GraphDef + } + return nil +} + +func (m *MetaGraphDef) GetSaverDef() *SaverDef { + if m != nil { + return m.SaverDef + } + return nil +} + +func (m *MetaGraphDef) GetCollectionDef() map[string]*CollectionDef { + if m != nil { + return m.CollectionDef + } + return nil +} + +func (m *MetaGraphDef) GetSignatureDef() map[string]*SignatureDef { + if m != nil { + return m.SignatureDef + } + return nil +} + +func (m *MetaGraphDef) GetAssetFileDef() []*AssetFileDef { + if m != nil { + return m.AssetFileDef + } + return nil +} + +func (m *MetaGraphDef) GetObjectGraphDef() *SavedObjectGraph { + if m != nil { + return m.ObjectGraphDef + } + return nil +} + +// Meta information regarding the graph to be exported. To be used by users +// of this protocol buffer to encode information regarding their meta graph. +type MetaGraphDef_MetaInfoDef struct { + // User specified Version string. Can be the name of the model and revision, + // steps this model has been trained to, etc. + MetaGraphVersion string `protobuf:"bytes,1,opt,name=meta_graph_version,json=metaGraphVersion,proto3" json:"meta_graph_version,omitempty"` + // A copy of the OpDefs used by the producer of this graph_def. + // Descriptions and Ops not used in graph_def are stripped out. + StrippedOpList *framework.OpList `protobuf:"bytes,2,opt,name=stripped_op_list,json=strippedOpList,proto3" json:"stripped_op_list,omitempty"` + // A serialized protobuf. Can be the time this meta graph is created, or + // modified, or name of the model. + AnyInfo *any.Any `protobuf:"bytes,3,opt,name=any_info,json=anyInfo,proto3" json:"any_info,omitempty"` + // User supplied tag(s) on the meta_graph and included graph_def. + // + // MetaGraphDefs should be tagged with their capabilities or use-cases. + // Examples: "train", "serve", "gpu", "tpu", etc. + // These tags enable loaders to access the MetaGraph(s) appropriate for a + // specific use-case or runtime environment. + Tags []string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags,omitempty"` + // The __version__ string of the tensorflow build used to write this graph. + // This will be populated by the framework, which will overwrite any user + // supplied value. + TensorflowVersion string `protobuf:"bytes,5,opt,name=tensorflow_version,json=tensorflowVersion,proto3" json:"tensorflow_version,omitempty"` + // The __git_version__ string of the tensorflow build used to write this + // graph. This will be populated by the framework, which will overwrite any + // user supplied value. + TensorflowGitVersion string `protobuf:"bytes,6,opt,name=tensorflow_git_version,json=tensorflowGitVersion,proto3" json:"tensorflow_git_version,omitempty"` + // A flag to denote whether default-valued attrs have been stripped from + // the nodes in this graph_def. + StrippedDefaultAttrs bool `protobuf:"varint,7,opt,name=stripped_default_attrs,json=strippedDefaultAttrs,proto3" json:"stripped_default_attrs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetaGraphDef_MetaInfoDef) Reset() { *m = MetaGraphDef_MetaInfoDef{} } +func (m *MetaGraphDef_MetaInfoDef) String() string { return proto.CompactTextString(m) } +func (*MetaGraphDef_MetaInfoDef) ProtoMessage() {} +func (*MetaGraphDef_MetaInfoDef) Descriptor() ([]byte, []int) { + return fileDescriptor_e94adf32e895c059, []int{0, 0} +} + +func (m *MetaGraphDef_MetaInfoDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetaGraphDef_MetaInfoDef.Unmarshal(m, b) +} +func (m *MetaGraphDef_MetaInfoDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetaGraphDef_MetaInfoDef.Marshal(b, m, deterministic) +} +func (m *MetaGraphDef_MetaInfoDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetaGraphDef_MetaInfoDef.Merge(m, src) +} +func (m *MetaGraphDef_MetaInfoDef) XXX_Size() int { + return xxx_messageInfo_MetaGraphDef_MetaInfoDef.Size(m) +} +func (m *MetaGraphDef_MetaInfoDef) XXX_DiscardUnknown() { + xxx_messageInfo_MetaGraphDef_MetaInfoDef.DiscardUnknown(m) +} + +var xxx_messageInfo_MetaGraphDef_MetaInfoDef proto.InternalMessageInfo + +func (m *MetaGraphDef_MetaInfoDef) GetMetaGraphVersion() string { + if m != nil { + return m.MetaGraphVersion + } + return "" +} + +func (m *MetaGraphDef_MetaInfoDef) GetStrippedOpList() *framework.OpList { + if m != nil { + return m.StrippedOpList + } + return nil +} + +func (m *MetaGraphDef_MetaInfoDef) GetAnyInfo() *any.Any { + if m != nil { + return m.AnyInfo + } + return nil +} + +func (m *MetaGraphDef_MetaInfoDef) GetTags() []string { + if m != nil { + return m.Tags + } + return nil +} + +func (m *MetaGraphDef_MetaInfoDef) GetTensorflowVersion() string { + if m != nil { + return m.TensorflowVersion + } + return "" +} + +func (m *MetaGraphDef_MetaInfoDef) GetTensorflowGitVersion() string { + if m != nil { + return m.TensorflowGitVersion + } + return "" +} + +func (m *MetaGraphDef_MetaInfoDef) GetStrippedDefaultAttrs() bool { + if m != nil { + return m.StrippedDefaultAttrs + } + return false +} + +// CollectionDef should cover most collections. +// To add a user-defined collection, do one of the following: +// 1. For simple data types, such as string, int, float: +// tf.add_to_collection("your_collection_name", your_simple_value) +// strings will be stored as bytes_list. +// +// 2. For Protobuf types, there are three ways to add them: +// 1) tf.add_to_collection("your_collection_name", +// your_proto.SerializeToString()) +// +// collection_def { +// key: "user_defined_bytes_collection" +// value { +// bytes_list { +// value: "queue_name: \"test_queue\"\n" +// } +// } +// } +// +// or +// +// 2) tf.add_to_collection("your_collection_name", str(your_proto)) +// +// collection_def { +// key: "user_defined_string_collection" +// value { +// bytes_list { +// value: "\n\ntest_queue" +// } +// } +// } +// +// or +// +// 3) any_buf = any_pb2.Any() +// tf.add_to_collection("your_collection_name", +// any_buf.Pack(your_proto)) +// +// collection_def { +// key: "user_defined_any_collection" +// value { +// any_list { +// value { +// type_url: "type.googleapis.com/tensorflow.QueueRunnerDef" +// value: "\n\ntest_queue" +// } +// } +// } +// } +// +// 3. For Python objects, implement to_proto() and from_proto(), and register +// them in the following manner: +// ops.register_proto_function("your_collection_name", +// proto_type, +// to_proto=YourPythonObject.to_proto, +// from_proto=YourPythonObject.from_proto) +// These functions will be invoked to serialize and de-serialize the +// collection. For example, +// ops.register_proto_function(ops.GraphKeys.GLOBAL_VARIABLES, +// proto_type=variable_pb2.VariableDef, +// to_proto=Variable.to_proto, +// from_proto=Variable.from_proto) +type CollectionDef struct { + // Types that are valid to be assigned to Kind: + // *CollectionDef_NodeList_ + // *CollectionDef_BytesList_ + // *CollectionDef_Int64List_ + // *CollectionDef_FloatList_ + // *CollectionDef_AnyList_ + Kind isCollectionDef_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CollectionDef) Reset() { *m = CollectionDef{} } +func (m *CollectionDef) String() string { return proto.CompactTextString(m) } +func (*CollectionDef) ProtoMessage() {} +func (*CollectionDef) Descriptor() ([]byte, []int) { + return fileDescriptor_e94adf32e895c059, []int{1} +} + +func (m *CollectionDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CollectionDef.Unmarshal(m, b) +} +func (m *CollectionDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CollectionDef.Marshal(b, m, deterministic) +} +func (m *CollectionDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_CollectionDef.Merge(m, src) +} +func (m *CollectionDef) XXX_Size() int { + return xxx_messageInfo_CollectionDef.Size(m) +} +func (m *CollectionDef) XXX_DiscardUnknown() { + xxx_messageInfo_CollectionDef.DiscardUnknown(m) +} + +var xxx_messageInfo_CollectionDef proto.InternalMessageInfo + +type isCollectionDef_Kind interface { + isCollectionDef_Kind() +} + +type CollectionDef_NodeList_ struct { + NodeList *CollectionDef_NodeList `protobuf:"bytes,1,opt,name=node_list,json=nodeList,proto3,oneof"` +} + +type CollectionDef_BytesList_ struct { + BytesList *CollectionDef_BytesList `protobuf:"bytes,2,opt,name=bytes_list,json=bytesList,proto3,oneof"` +} + +type CollectionDef_Int64List_ struct { + Int64List *CollectionDef_Int64List `protobuf:"bytes,3,opt,name=int64_list,json=int64List,proto3,oneof"` +} + +type CollectionDef_FloatList_ struct { + FloatList *CollectionDef_FloatList `protobuf:"bytes,4,opt,name=float_list,json=floatList,proto3,oneof"` +} + +type CollectionDef_AnyList_ struct { + AnyList *CollectionDef_AnyList `protobuf:"bytes,5,opt,name=any_list,json=anyList,proto3,oneof"` +} + +func (*CollectionDef_NodeList_) isCollectionDef_Kind() {} + +func (*CollectionDef_BytesList_) isCollectionDef_Kind() {} + +func (*CollectionDef_Int64List_) isCollectionDef_Kind() {} + +func (*CollectionDef_FloatList_) isCollectionDef_Kind() {} + +func (*CollectionDef_AnyList_) isCollectionDef_Kind() {} + +func (m *CollectionDef) GetKind() isCollectionDef_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *CollectionDef) GetNodeList() *CollectionDef_NodeList { + if x, ok := m.GetKind().(*CollectionDef_NodeList_); ok { + return x.NodeList + } + return nil +} + +func (m *CollectionDef) GetBytesList() *CollectionDef_BytesList { + if x, ok := m.GetKind().(*CollectionDef_BytesList_); ok { + return x.BytesList + } + return nil +} + +func (m *CollectionDef) GetInt64List() *CollectionDef_Int64List { + if x, ok := m.GetKind().(*CollectionDef_Int64List_); ok { + return x.Int64List + } + return nil +} + +func (m *CollectionDef) GetFloatList() *CollectionDef_FloatList { + if x, ok := m.GetKind().(*CollectionDef_FloatList_); ok { + return x.FloatList + } + return nil +} + +func (m *CollectionDef) GetAnyList() *CollectionDef_AnyList { + if x, ok := m.GetKind().(*CollectionDef_AnyList_); ok { + return x.AnyList + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*CollectionDef) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*CollectionDef_NodeList_)(nil), + (*CollectionDef_BytesList_)(nil), + (*CollectionDef_Int64List_)(nil), + (*CollectionDef_FloatList_)(nil), + (*CollectionDef_AnyList_)(nil), + } +} + +// NodeList is used for collecting nodes in graph. For example +// collection_def { +// key: "summaries" +// value { +// node_list { +// value: "input_producer/ScalarSummary:0" +// value: "shuffle_batch/ScalarSummary:0" +// value: "ImageSummary:0" +// } +// } +type CollectionDef_NodeList struct { + Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CollectionDef_NodeList) Reset() { *m = CollectionDef_NodeList{} } +func (m *CollectionDef_NodeList) String() string { return proto.CompactTextString(m) } +func (*CollectionDef_NodeList) ProtoMessage() {} +func (*CollectionDef_NodeList) Descriptor() ([]byte, []int) { + return fileDescriptor_e94adf32e895c059, []int{1, 0} +} + +func (m *CollectionDef_NodeList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CollectionDef_NodeList.Unmarshal(m, b) +} +func (m *CollectionDef_NodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CollectionDef_NodeList.Marshal(b, m, deterministic) +} +func (m *CollectionDef_NodeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CollectionDef_NodeList.Merge(m, src) +} +func (m *CollectionDef_NodeList) XXX_Size() int { + return xxx_messageInfo_CollectionDef_NodeList.Size(m) +} +func (m *CollectionDef_NodeList) XXX_DiscardUnknown() { + xxx_messageInfo_CollectionDef_NodeList.DiscardUnknown(m) +} + +var xxx_messageInfo_CollectionDef_NodeList proto.InternalMessageInfo + +func (m *CollectionDef_NodeList) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +// BytesList is used for collecting strings and serialized protobufs. For +// example: +// collection_def { +// key: "trainable_variables" +// value { +// bytes_list { +// value: "\n\017conv1/weights:0\022\024conv1/weights/Assign +// \032\024conv1/weights/read:0" +// value: "\n\016conv1/biases:0\022\023conv1/biases/Assign\032 +// \023conv1/biases/read:0" +// } +// } +// } +type CollectionDef_BytesList struct { + Value [][]byte `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CollectionDef_BytesList) Reset() { *m = CollectionDef_BytesList{} } +func (m *CollectionDef_BytesList) String() string { return proto.CompactTextString(m) } +func (*CollectionDef_BytesList) ProtoMessage() {} +func (*CollectionDef_BytesList) Descriptor() ([]byte, []int) { + return fileDescriptor_e94adf32e895c059, []int{1, 1} +} + +func (m *CollectionDef_BytesList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CollectionDef_BytesList.Unmarshal(m, b) +} +func (m *CollectionDef_BytesList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CollectionDef_BytesList.Marshal(b, m, deterministic) +} +func (m *CollectionDef_BytesList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CollectionDef_BytesList.Merge(m, src) +} +func (m *CollectionDef_BytesList) XXX_Size() int { + return xxx_messageInfo_CollectionDef_BytesList.Size(m) +} +func (m *CollectionDef_BytesList) XXX_DiscardUnknown() { + xxx_messageInfo_CollectionDef_BytesList.DiscardUnknown(m) +} + +var xxx_messageInfo_CollectionDef_BytesList proto.InternalMessageInfo + +func (m *CollectionDef_BytesList) GetValue() [][]byte { + if m != nil { + return m.Value + } + return nil +} + +// Int64List is used for collecting int, int64 and long values. +type CollectionDef_Int64List struct { + Value []int64 `protobuf:"varint,1,rep,packed,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CollectionDef_Int64List) Reset() { *m = CollectionDef_Int64List{} } +func (m *CollectionDef_Int64List) String() string { return proto.CompactTextString(m) } +func (*CollectionDef_Int64List) ProtoMessage() {} +func (*CollectionDef_Int64List) Descriptor() ([]byte, []int) { + return fileDescriptor_e94adf32e895c059, []int{1, 2} +} + +func (m *CollectionDef_Int64List) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CollectionDef_Int64List.Unmarshal(m, b) +} +func (m *CollectionDef_Int64List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CollectionDef_Int64List.Marshal(b, m, deterministic) +} +func (m *CollectionDef_Int64List) XXX_Merge(src proto.Message) { + xxx_messageInfo_CollectionDef_Int64List.Merge(m, src) +} +func (m *CollectionDef_Int64List) XXX_Size() int { + return xxx_messageInfo_CollectionDef_Int64List.Size(m) +} +func (m *CollectionDef_Int64List) XXX_DiscardUnknown() { + xxx_messageInfo_CollectionDef_Int64List.DiscardUnknown(m) +} + +var xxx_messageInfo_CollectionDef_Int64List proto.InternalMessageInfo + +func (m *CollectionDef_Int64List) GetValue() []int64 { + if m != nil { + return m.Value + } + return nil +} + +// FloatList is used for collecting float values. +type CollectionDef_FloatList struct { + Value []float32 `protobuf:"fixed32,1,rep,packed,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CollectionDef_FloatList) Reset() { *m = CollectionDef_FloatList{} } +func (m *CollectionDef_FloatList) String() string { return proto.CompactTextString(m) } +func (*CollectionDef_FloatList) ProtoMessage() {} +func (*CollectionDef_FloatList) Descriptor() ([]byte, []int) { + return fileDescriptor_e94adf32e895c059, []int{1, 3} +} + +func (m *CollectionDef_FloatList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CollectionDef_FloatList.Unmarshal(m, b) +} +func (m *CollectionDef_FloatList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CollectionDef_FloatList.Marshal(b, m, deterministic) +} +func (m *CollectionDef_FloatList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CollectionDef_FloatList.Merge(m, src) +} +func (m *CollectionDef_FloatList) XXX_Size() int { + return xxx_messageInfo_CollectionDef_FloatList.Size(m) +} +func (m *CollectionDef_FloatList) XXX_DiscardUnknown() { + xxx_messageInfo_CollectionDef_FloatList.DiscardUnknown(m) +} + +var xxx_messageInfo_CollectionDef_FloatList proto.InternalMessageInfo + +func (m *CollectionDef_FloatList) GetValue() []float32 { + if m != nil { + return m.Value + } + return nil +} + +// AnyList is used for collecting Any protos. +type CollectionDef_AnyList struct { + Value []*any.Any `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CollectionDef_AnyList) Reset() { *m = CollectionDef_AnyList{} } +func (m *CollectionDef_AnyList) String() string { return proto.CompactTextString(m) } +func (*CollectionDef_AnyList) ProtoMessage() {} +func (*CollectionDef_AnyList) Descriptor() ([]byte, []int) { + return fileDescriptor_e94adf32e895c059, []int{1, 4} +} + +func (m *CollectionDef_AnyList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CollectionDef_AnyList.Unmarshal(m, b) +} +func (m *CollectionDef_AnyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CollectionDef_AnyList.Marshal(b, m, deterministic) +} +func (m *CollectionDef_AnyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CollectionDef_AnyList.Merge(m, src) +} +func (m *CollectionDef_AnyList) XXX_Size() int { + return xxx_messageInfo_CollectionDef_AnyList.Size(m) +} +func (m *CollectionDef_AnyList) XXX_DiscardUnknown() { + xxx_messageInfo_CollectionDef_AnyList.DiscardUnknown(m) +} + +var xxx_messageInfo_CollectionDef_AnyList proto.InternalMessageInfo + +func (m *CollectionDef_AnyList) GetValue() []*any.Any { + if m != nil { + return m.Value + } + return nil +} + +// Information about a Tensor necessary for feeding or retrieval. +type TensorInfo struct { + // Types that are valid to be assigned to Encoding: + // *TensorInfo_Name + // *TensorInfo_CooSparse_ + // *TensorInfo_CompositeTensor_ + Encoding isTensorInfo_Encoding `protobuf_oneof:"encoding"` + Dtype framework.DataType `protobuf:"varint,2,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"` + // The static shape should be recorded here, to the extent that it can + // be known in advance. In the case of a SparseTensor, this field describes + // the logical shape of the represented tensor (aka dense_shape). + TensorShape *framework.TensorShapeProto `protobuf:"bytes,3,opt,name=tensor_shape,json=tensorShape,proto3" json:"tensor_shape,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TensorInfo) Reset() { *m = TensorInfo{} } +func (m *TensorInfo) String() string { return proto.CompactTextString(m) } +func (*TensorInfo) ProtoMessage() {} +func (*TensorInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_e94adf32e895c059, []int{2} +} + +func (m *TensorInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TensorInfo.Unmarshal(m, b) +} +func (m *TensorInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TensorInfo.Marshal(b, m, deterministic) +} +func (m *TensorInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_TensorInfo.Merge(m, src) +} +func (m *TensorInfo) XXX_Size() int { + return xxx_messageInfo_TensorInfo.Size(m) +} +func (m *TensorInfo) XXX_DiscardUnknown() { + xxx_messageInfo_TensorInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_TensorInfo proto.InternalMessageInfo + +type isTensorInfo_Encoding interface { + isTensorInfo_Encoding() +} + +type TensorInfo_Name struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3,oneof"` +} + +type TensorInfo_CooSparse_ struct { + CooSparse *TensorInfo_CooSparse `protobuf:"bytes,4,opt,name=coo_sparse,json=cooSparse,proto3,oneof"` +} + +type TensorInfo_CompositeTensor_ struct { + CompositeTensor *TensorInfo_CompositeTensor `protobuf:"bytes,5,opt,name=composite_tensor,json=compositeTensor,proto3,oneof"` +} + +func (*TensorInfo_Name) isTensorInfo_Encoding() {} + +func (*TensorInfo_CooSparse_) isTensorInfo_Encoding() {} + +func (*TensorInfo_CompositeTensor_) isTensorInfo_Encoding() {} + +func (m *TensorInfo) GetEncoding() isTensorInfo_Encoding { + if m != nil { + return m.Encoding + } + return nil +} + +func (m *TensorInfo) GetName() string { + if x, ok := m.GetEncoding().(*TensorInfo_Name); ok { + return x.Name + } + return "" +} + +func (m *TensorInfo) GetCooSparse() *TensorInfo_CooSparse { + if x, ok := m.GetEncoding().(*TensorInfo_CooSparse_); ok { + return x.CooSparse + } + return nil +} + +func (m *TensorInfo) GetCompositeTensor() *TensorInfo_CompositeTensor { + if x, ok := m.GetEncoding().(*TensorInfo_CompositeTensor_); ok { + return x.CompositeTensor + } + return nil +} + +func (m *TensorInfo) GetDtype() framework.DataType { + if m != nil { + return m.Dtype + } + return framework.DataType_DT_INVALID +} + +func (m *TensorInfo) GetTensorShape() *framework.TensorShapeProto { + if m != nil { + return m.TensorShape + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*TensorInfo) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*TensorInfo_Name)(nil), + (*TensorInfo_CooSparse_)(nil), + (*TensorInfo_CompositeTensor_)(nil), + } +} + +// For sparse tensors, The COO encoding stores a triple of values, indices, +// and shape. +type TensorInfo_CooSparse struct { + // The shape of the values Tensor is [?]. Its dtype must be the dtype of + // the SparseTensor as a whole, given in the enclosing TensorInfo. + ValuesTensorName string `protobuf:"bytes,1,opt,name=values_tensor_name,json=valuesTensorName,proto3" json:"values_tensor_name,omitempty"` + // The indices Tensor must have dtype int64 and shape [?, ?]. + IndicesTensorName string `protobuf:"bytes,2,opt,name=indices_tensor_name,json=indicesTensorName,proto3" json:"indices_tensor_name,omitempty"` + // The dynamic logical shape represented by the SparseTensor is recorded in + // the Tensor referenced here. It must have dtype int64 and shape [?]. + DenseShapeTensorName string `protobuf:"bytes,3,opt,name=dense_shape_tensor_name,json=denseShapeTensorName,proto3" json:"dense_shape_tensor_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TensorInfo_CooSparse) Reset() { *m = TensorInfo_CooSparse{} } +func (m *TensorInfo_CooSparse) String() string { return proto.CompactTextString(m) } +func (*TensorInfo_CooSparse) ProtoMessage() {} +func (*TensorInfo_CooSparse) Descriptor() ([]byte, []int) { + return fileDescriptor_e94adf32e895c059, []int{2, 0} +} + +func (m *TensorInfo_CooSparse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TensorInfo_CooSparse.Unmarshal(m, b) +} +func (m *TensorInfo_CooSparse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TensorInfo_CooSparse.Marshal(b, m, deterministic) +} +func (m *TensorInfo_CooSparse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TensorInfo_CooSparse.Merge(m, src) +} +func (m *TensorInfo_CooSparse) XXX_Size() int { + return xxx_messageInfo_TensorInfo_CooSparse.Size(m) +} +func (m *TensorInfo_CooSparse) XXX_DiscardUnknown() { + xxx_messageInfo_TensorInfo_CooSparse.DiscardUnknown(m) +} + +var xxx_messageInfo_TensorInfo_CooSparse proto.InternalMessageInfo + +func (m *TensorInfo_CooSparse) GetValuesTensorName() string { + if m != nil { + return m.ValuesTensorName + } + return "" +} + +func (m *TensorInfo_CooSparse) GetIndicesTensorName() string { + if m != nil { + return m.IndicesTensorName + } + return "" +} + +func (m *TensorInfo_CooSparse) GetDenseShapeTensorName() string { + if m != nil { + return m.DenseShapeTensorName + } + return "" +} + +// Generic encoding for composite tensors. +type TensorInfo_CompositeTensor struct { + // The serialized TypeSpec for the composite tensor. + TypeSpec *TypeSpecProto `protobuf:"bytes,1,opt,name=type_spec,json=typeSpec,proto3" json:"type_spec,omitempty"` + // A TensorInfo for each flattened component tensor. + Components []*TensorInfo `protobuf:"bytes,2,rep,name=components,proto3" json:"components,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TensorInfo_CompositeTensor) Reset() { *m = TensorInfo_CompositeTensor{} } +func (m *TensorInfo_CompositeTensor) String() string { return proto.CompactTextString(m) } +func (*TensorInfo_CompositeTensor) ProtoMessage() {} +func (*TensorInfo_CompositeTensor) Descriptor() ([]byte, []int) { + return fileDescriptor_e94adf32e895c059, []int{2, 1} +} + +func (m *TensorInfo_CompositeTensor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TensorInfo_CompositeTensor.Unmarshal(m, b) +} +func (m *TensorInfo_CompositeTensor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TensorInfo_CompositeTensor.Marshal(b, m, deterministic) +} +func (m *TensorInfo_CompositeTensor) XXX_Merge(src proto.Message) { + xxx_messageInfo_TensorInfo_CompositeTensor.Merge(m, src) +} +func (m *TensorInfo_CompositeTensor) XXX_Size() int { + return xxx_messageInfo_TensorInfo_CompositeTensor.Size(m) +} +func (m *TensorInfo_CompositeTensor) XXX_DiscardUnknown() { + xxx_messageInfo_TensorInfo_CompositeTensor.DiscardUnknown(m) +} + +var xxx_messageInfo_TensorInfo_CompositeTensor proto.InternalMessageInfo + +func (m *TensorInfo_CompositeTensor) GetTypeSpec() *TypeSpecProto { + if m != nil { + return m.TypeSpec + } + return nil +} + +func (m *TensorInfo_CompositeTensor) GetComponents() []*TensorInfo { + if m != nil { + return m.Components + } + return nil +} + +// SignatureDef defines the signature of a computation supported by a TensorFlow +// graph. +// +// For example, a model with two loss computations, sharing a single input, +// might have the following signature_def map. +// +// Note that across the two SignatureDefs "loss_A" and "loss_B", the input key, +// output key, and method_name are identical, and will be used by system(s) that +// implement or rely upon this particular loss method. The output tensor names +// differ, demonstrating how different outputs can exist for the same method. +// +// signature_def { +// key: "loss_A" +// value { +// inputs { +// key: "input" +// value { +// name: "input:0" +// dtype: DT_STRING +// tensor_shape: ... +// } +// } +// outputs { +// key: "loss_output" +// value { +// name: "loss_output_A:0" +// dtype: DT_FLOAT +// tensor_shape: ... +// } +// } +// } +// ... +// method_name: "some/package/compute_loss" +// } +// signature_def { +// key: "loss_B" +// value { +// inputs { +// key: "input" +// value { +// name: "input:0" +// dtype: DT_STRING +// tensor_shape: ... +// } +// } +// outputs { +// key: "loss_output" +// value { +// name: "loss_output_B:0" +// dtype: DT_FLOAT +// tensor_shape: ... +// } +// } +// } +// ... +// method_name: "some/package/compute_loss" +// } +type SignatureDef struct { + // Named input parameters. + Inputs map[string]*TensorInfo `protobuf:"bytes,1,rep,name=inputs,proto3" json:"inputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Named output parameters. + Outputs map[string]*TensorInfo `protobuf:"bytes,2,rep,name=outputs,proto3" json:"outputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Extensible method_name information enabling third-party users to mark a + // SignatureDef as supporting a particular method. This enables producers and + // consumers of SignatureDefs, e.g. a model definition library and a serving + // library to have a clear hand-off regarding the semantics of a computation. + // + // Note that multiple SignatureDefs in a single MetaGraphDef may have the same + // method_name. This is commonly used to support multi-headed computation, + // where a single graph computation may return multiple results. + MethodName string `protobuf:"bytes,3,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SignatureDef) Reset() { *m = SignatureDef{} } +func (m *SignatureDef) String() string { return proto.CompactTextString(m) } +func (*SignatureDef) ProtoMessage() {} +func (*SignatureDef) Descriptor() ([]byte, []int) { + return fileDescriptor_e94adf32e895c059, []int{3} +} + +func (m *SignatureDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SignatureDef.Unmarshal(m, b) +} +func (m *SignatureDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SignatureDef.Marshal(b, m, deterministic) +} +func (m *SignatureDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureDef.Merge(m, src) +} +func (m *SignatureDef) XXX_Size() int { + return xxx_messageInfo_SignatureDef.Size(m) +} +func (m *SignatureDef) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureDef.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureDef proto.InternalMessageInfo + +func (m *SignatureDef) GetInputs() map[string]*TensorInfo { + if m != nil { + return m.Inputs + } + return nil +} + +func (m *SignatureDef) GetOutputs() map[string]*TensorInfo { + if m != nil { + return m.Outputs + } + return nil +} + +func (m *SignatureDef) GetMethodName() string { + if m != nil { + return m.MethodName + } + return "" +} + +// An asset file def for a single file or a set of sharded files with the same +// name. +type AssetFileDef struct { + // The tensor to bind the asset filename to. + TensorInfo *TensorInfo `protobuf:"bytes,1,opt,name=tensor_info,json=tensorInfo,proto3" json:"tensor_info,omitempty"` + // The filename within an assets directory. Note: does not include the path + // prefix, i.e. directories. For an asset at /tmp/path/vocab.txt, the filename + // would be "vocab.txt". + Filename string `protobuf:"bytes,2,opt,name=filename,proto3" json:"filename,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AssetFileDef) Reset() { *m = AssetFileDef{} } +func (m *AssetFileDef) String() string { return proto.CompactTextString(m) } +func (*AssetFileDef) ProtoMessage() {} +func (*AssetFileDef) Descriptor() ([]byte, []int) { + return fileDescriptor_e94adf32e895c059, []int{4} +} + +func (m *AssetFileDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AssetFileDef.Unmarshal(m, b) +} +func (m *AssetFileDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AssetFileDef.Marshal(b, m, deterministic) +} +func (m *AssetFileDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_AssetFileDef.Merge(m, src) +} +func (m *AssetFileDef) XXX_Size() int { + return xxx_messageInfo_AssetFileDef.Size(m) +} +func (m *AssetFileDef) XXX_DiscardUnknown() { + xxx_messageInfo_AssetFileDef.DiscardUnknown(m) +} + +var xxx_messageInfo_AssetFileDef proto.InternalMessageInfo + +func (m *AssetFileDef) GetTensorInfo() *TensorInfo { + if m != nil { + return m.TensorInfo + } + return nil +} + +func (m *AssetFileDef) GetFilename() string { + if m != nil { + return m.Filename + } + return "" +} + +func init() { + proto.RegisterType((*MetaGraphDef)(nil), "tensorflow.MetaGraphDef") + proto.RegisterMapType((map[string]*CollectionDef)(nil), "tensorflow.MetaGraphDef.CollectionDefEntry") + proto.RegisterMapType((map[string]*SignatureDef)(nil), "tensorflow.MetaGraphDef.SignatureDefEntry") + proto.RegisterType((*MetaGraphDef_MetaInfoDef)(nil), "tensorflow.MetaGraphDef.MetaInfoDef") + proto.RegisterType((*CollectionDef)(nil), "tensorflow.CollectionDef") + proto.RegisterType((*CollectionDef_NodeList)(nil), "tensorflow.CollectionDef.NodeList") + proto.RegisterType((*CollectionDef_BytesList)(nil), "tensorflow.CollectionDef.BytesList") + proto.RegisterType((*CollectionDef_Int64List)(nil), "tensorflow.CollectionDef.Int64List") + proto.RegisterType((*CollectionDef_FloatList)(nil), "tensorflow.CollectionDef.FloatList") + proto.RegisterType((*CollectionDef_AnyList)(nil), "tensorflow.CollectionDef.AnyList") + proto.RegisterType((*TensorInfo)(nil), "tensorflow.TensorInfo") + proto.RegisterType((*TensorInfo_CooSparse)(nil), "tensorflow.TensorInfo.CooSparse") + proto.RegisterType((*TensorInfo_CompositeTensor)(nil), "tensorflow.TensorInfo.CompositeTensor") + proto.RegisterType((*SignatureDef)(nil), "tensorflow.SignatureDef") + proto.RegisterMapType((map[string]*TensorInfo)(nil), "tensorflow.SignatureDef.InputsEntry") + proto.RegisterMapType((map[string]*TensorInfo)(nil), "tensorflow.SignatureDef.OutputsEntry") + proto.RegisterType((*AssetFileDef)(nil), "tensorflow.AssetFileDef") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/meta_graph.proto", fileDescriptor_e94adf32e895c059) +} + +var fileDescriptor_e94adf32e895c059 = []byte{ + // 1152 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x8e, 0x7e, 0x6c, 0x4b, 0x23, 0xd9, 0x56, 0xb6, 0x46, 0xea, 0x10, 0x01, 0xea, 0xa8, 0x76, + 0x90, 0xa6, 0x2e, 0x05, 0xa7, 0x89, 0x5b, 0x14, 0x46, 0x02, 0x29, 0xaa, 0x63, 0x03, 0x6d, 0x9c, + 0x52, 0x41, 0x81, 0xb6, 0x07, 0x82, 0x22, 0x97, 0x34, 0x6b, 0x69, 0x97, 0xe0, 0xae, 0x1c, 0xe8, + 0xd8, 0x37, 0xe9, 0x53, 0xf4, 0x4d, 0x7a, 0x68, 0x9f, 0xa4, 0xb7, 0x16, 0xfb, 0x43, 0x72, 0x69, + 0x9b, 0xf1, 0xa5, 0x37, 0xce, 0xce, 0x37, 0xdf, 0xce, 0x7c, 0x33, 0xbb, 0x5c, 0xf8, 0x8c, 0x63, + 0xc2, 0x68, 0x1a, 0xce, 0xe8, 0xfb, 0x81, 0x4f, 0x53, 0x3c, 0x48, 0x52, 0xca, 0xe9, 0x74, 0x11, + 0x0e, 0xe6, 0x98, 0x7b, 0x6e, 0x94, 0x7a, 0xc9, 0xb9, 0x2d, 0xd7, 0x10, 0x14, 0x50, 0xeb, 0x7e, + 0x44, 0x69, 0x34, 0x33, 0xd0, 0x1e, 0x59, 0x2a, 0x98, 0xb5, 0x77, 0x95, 0x31, 0x4c, 0xbd, 0x39, + 0x7e, 0x4f, 0xd3, 0x8b, 0x81, 0xc1, 0x66, 0x3d, 0xaa, 0x86, 0xd1, 0xc4, 0x0d, 0x70, 0xa8, 0x71, + 0xfb, 0xd5, 0x38, 0xe5, 0x71, 0xd9, 0xb9, 0x97, 0xe0, 0xdb, 0x37, 0xe7, 0xcb, 0x04, 0x33, 0x0d, + 0x3b, 0xa8, 0xac, 0x9a, 0x79, 0x97, 0x38, 0x70, 0xe9, 0xf4, 0x57, 0xec, 0x73, 0xb3, 0x7a, 0x6b, + 0xf7, 0x83, 0x21, 0x69, 0xd5, 0xfe, 0x05, 0x8a, 0xa7, 0x0b, 0x9f, 0x2b, 0x58, 0xff, 0xdf, 0x35, + 0xe8, 0x7e, 0x8f, 0xb9, 0xf7, 0x5a, 0x6c, 0x30, 0xc6, 0x21, 0x3a, 0x81, 0x75, 0xa9, 0x77, 0x4c, + 0x42, 0x2a, 0x8a, 0xdf, 0xae, 0xed, 0xd4, 0x1e, 0x77, 0x9e, 0xee, 0xda, 0x05, 0x9f, 0x6d, 0x06, + 0x48, 0xe3, 0x94, 0x84, 0x74, 0x8c, 0x43, 0xa7, 0x33, 0x2f, 0x0c, 0x74, 0x00, 0x6d, 0x99, 0xb6, + 0x64, 0xa9, 0x4b, 0x96, 0x2d, 0x93, 0x25, 0x63, 0x70, 0x5a, 0x51, 0xb6, 0xf9, 0x01, 0xb4, 0x65, + 0x0d, 0x32, 0xa4, 0x71, 0x3d, 0x64, 0x22, 0x9c, 0x32, 0x84, 0xe9, 0x2f, 0xe4, 0xc0, 0x86, 0x4f, + 0x67, 0x33, 0xec, 0xf3, 0x98, 0x12, 0x19, 0xd7, 0xdc, 0x69, 0x3c, 0xee, 0x3c, 0xfd, 0xbc, 0x32, + 0xe1, 0x57, 0x39, 0x7c, 0x8c, 0xc3, 0x6f, 0x09, 0x4f, 0x97, 0xce, 0xba, 0x6f, 0xae, 0xa1, 0x33, + 0x58, 0x67, 0x71, 0x44, 0x3c, 0xbe, 0x48, 0xb1, 0xa4, 0x5c, 0x91, 0x94, 0x4f, 0x2a, 0x29, 0x27, + 0x19, 0x3a, 0x67, 0xec, 0x32, 0x63, 0x09, 0xbd, 0x80, 0x0d, 0x8f, 0x31, 0xcc, 0xdd, 0x30, 0x9e, + 0x29, 0xc6, 0x55, 0xc9, 0xb8, 0x6d, 0x32, 0x0e, 0x05, 0xe2, 0x38, 0x9e, 0x89, 0x08, 0xa7, 0xeb, + 0x19, 0x16, 0x3a, 0x86, 0x9e, 0x39, 0x08, 0x92, 0x61, 0x4d, 0xca, 0xf3, 0xe0, 0xaa, 0x3c, 0xc1, + 0x99, 0x04, 0xca, 0xd4, 0x9c, 0x0d, 0x5a, 0x18, 0x63, 0x1c, 0x5a, 0x7f, 0xd5, 0xa1, 0x63, 0xf4, + 0x0b, 0xed, 0x03, 0x2a, 0x0e, 0x97, 0x7b, 0x89, 0x53, 0x16, 0x53, 0x22, 0x3b, 0xde, 0x76, 0x7a, + 0xf3, 0xac, 0xc2, 0x1f, 0xd5, 0x3a, 0x3a, 0x82, 0x1e, 0xe3, 0x69, 0x9c, 0x24, 0x62, 0x2e, 0x13, + 0x77, 0x16, 0x33, 0xae, 0xfb, 0x8a, 0xcc, 0x2c, 0xce, 0x92, 0xef, 0x62, 0xc6, 0x9d, 0x8d, 0x0c, + 0xab, 0x6c, 0x34, 0x80, 0x96, 0x47, 0x96, 0x72, 0xae, 0xf2, 0xd6, 0xaa, 0xb3, 0x6b, 0x67, 0xa3, + 0x69, 0x0f, 0xc9, 0xd2, 0x59, 0xf3, 0xc8, 0x52, 0xe4, 0x87, 0x10, 0x34, 0xb9, 0x17, 0x31, 0xd9, + 0xcf, 0xb6, 0x23, 0xbf, 0xd1, 0x17, 0x80, 0x8a, 0x9d, 0xf2, 0x84, 0x57, 0x64, 0xc2, 0x77, 0x0b, + 0x4f, 0x96, 0xf1, 0x33, 0xb8, 0x67, 0xc0, 0xa3, 0x98, 0xe7, 0x21, 0xab, 0x32, 0x64, 0xab, 0xf0, + 0xbe, 0x8e, 0xb9, 0x11, 0x95, 0xd7, 0x19, 0xe0, 0xd0, 0x5b, 0xcc, 0xb8, 0xeb, 0x71, 0x9e, 0x32, + 0xa9, 0x79, 0xcb, 0xd9, 0xca, 0xbc, 0x63, 0xe5, 0x1c, 0x0a, 0x9f, 0xf5, 0x0b, 0xa0, 0xeb, 0x93, + 0x85, 0x7a, 0xd0, 0xb8, 0xc0, 0x4b, 0x2d, 0xa9, 0xf8, 0x44, 0x03, 0x58, 0xb9, 0xf4, 0x66, 0x0b, + 0xac, 0xa5, 0xbb, 0x6f, 0x4a, 0x57, 0x22, 0x70, 0x14, 0xee, 0x9b, 0xfa, 0xd7, 0x35, 0xeb, 0x27, + 0xb8, 0x7b, 0x6d, 0xc6, 0x6e, 0xe0, 0xb6, 0xcb, 0xdc, 0xa5, 0xf1, 0x32, 0xe3, 0x0d, 0xea, 0xfe, + 0x1f, 0x4d, 0x58, 0x2f, 0xed, 0x8b, 0x86, 0xd0, 0x26, 0x34, 0xc0, 0xaa, 0xc1, 0xea, 0xf8, 0xf7, + 0x2b, 0xb3, 0xb4, 0xdf, 0xd0, 0x00, 0x8b, 0x06, 0x9f, 0xdc, 0x71, 0x5a, 0x44, 0x7f, 0xa3, 0x31, + 0xc0, 0x74, 0xc9, 0x31, 0x33, 0x87, 0xe4, 0xd3, 0x6a, 0x8e, 0x91, 0xc0, 0x6a, 0x92, 0xf6, 0x34, + 0x33, 0x04, 0x4b, 0x4c, 0xf8, 0xe1, 0x33, 0xc5, 0xd2, 0xb8, 0x8d, 0xe5, 0x54, 0x60, 0x33, 0x96, + 0x38, 0x33, 0x04, 0x4b, 0x38, 0xa3, 0x1e, 0x57, 0x2c, 0xcd, 0xdb, 0x58, 0x8e, 0x05, 0x36, 0x63, + 0x09, 0x33, 0x03, 0xbd, 0x50, 0xe3, 0x2b, 0x39, 0x56, 0x24, 0xc7, 0xc3, 0x6a, 0x8e, 0x21, 0x59, + 0x6a, 0x06, 0x31, 0xcd, 0xe2, 0xd3, 0xda, 0x81, 0x56, 0xa6, 0x14, 0xda, 0xca, 0xda, 0x54, 0x93, + 0xa3, 0xad, 0x0c, 0xeb, 0x21, 0xb4, 0x73, 0x1d, 0xca, 0x90, 0x6e, 0x06, 0xd9, 0x83, 0x76, 0x5e, + 0x24, 0xda, 0x36, 0x21, 0x8d, 0x51, 0xbd, 0x57, 0x33, 0x60, 0x79, 0x15, 0x65, 0x58, 0xdd, 0x84, + 0x3d, 0x87, 0x35, 0x9d, 0x28, 0x7a, 0x62, 0x82, 0xaa, 0x4e, 0xa6, 0x82, 0x8c, 0x56, 0xa1, 0x79, + 0x11, 0x93, 0xa0, 0xff, 0x77, 0x13, 0xe0, 0x9d, 0x54, 0x40, 0x1e, 0xd7, 0x2d, 0x68, 0x12, 0x6f, + 0x8e, 0xd5, 0x38, 0x9e, 0xdc, 0x71, 0xa4, 0x85, 0x86, 0x00, 0x3e, 0xa5, 0x2e, 0x4b, 0xbc, 0x94, + 0x61, 0x2d, 0xfe, 0x8e, 0x29, 0x5c, 0xc1, 0x60, 0xbf, 0xa2, 0x74, 0x22, 0x71, 0x42, 0x79, 0x3f, + 0x33, 0xd0, 0x04, 0x7a, 0x3e, 0x9d, 0x27, 0x94, 0xc5, 0x1c, 0xbb, 0x2a, 0x52, 0x77, 0xe0, 0x51, + 0x25, 0x91, 0x86, 0xab, 0xb5, 0x93, 0x3b, 0xce, 0xa6, 0x5f, 0x5e, 0x12, 0x05, 0x07, 0xe2, 0x3f, + 0x2c, 0x67, 0x73, 0xa3, 0xfc, 0x97, 0x19, 0x7b, 0xdc, 0x7b, 0xb7, 0x4c, 0xb0, 0xa3, 0x20, 0xe8, + 0x25, 0x74, 0xcd, 0x1f, 0xbc, 0x1e, 0xc4, 0x07, 0xd7, 0x37, 0x9f, 0x08, 0xf7, 0x5b, 0x21, 0x99, + 0xd3, 0xe1, 0xc5, 0x8a, 0xf5, 0x7b, 0x0d, 0xda, 0x79, 0x71, 0xe2, 0xd2, 0x95, 0x42, 0x32, 0x5d, + 0x8c, 0x5b, 0xc8, 0xe6, 0xf4, 0x94, 0x47, 0xd1, 0xbd, 0x11, 0x02, 0xda, 0xf0, 0x51, 0x4c, 0x82, + 0xd8, 0xbf, 0x02, 0xaf, 0xab, 0x2b, 0x4f, 0xbb, 0x0c, 0xfc, 0x73, 0xf8, 0x38, 0xc0, 0x84, 0x61, + 0x95, 0x6b, 0x29, 0xa6, 0xa1, 0xee, 0x3c, 0xe9, 0x96, 0x89, 0x15, 0x61, 0xd6, 0x6f, 0x35, 0xd8, + 0xbc, 0x22, 0x1b, 0x3a, 0x84, 0xb6, 0xa8, 0xdf, 0x65, 0x09, 0xf6, 0xf5, 0x3d, 0x50, 0xba, 0xad, + 0x84, 0x46, 0x93, 0x04, 0xfb, 0xaa, 0xe2, 0x16, 0xd7, 0x26, 0x3a, 0x14, 0x3d, 0x9f, 0x27, 0x94, + 0x60, 0xc2, 0xd9, 0x76, 0x5d, 0x4e, 0xd4, 0xbd, 0x9b, 0x5b, 0xe5, 0x18, 0xc8, 0x11, 0x40, 0x0b, + 0x13, 0x9f, 0x06, 0x31, 0x89, 0xfa, 0x7f, 0xd6, 0xa1, 0x6b, 0xde, 0x58, 0xe8, 0x08, 0x56, 0x63, + 0x92, 0x2c, 0x38, 0xd3, 0x23, 0xba, 0x5b, 0x75, 0xb7, 0xd9, 0xa7, 0x12, 0xa6, 0x7e, 0xc3, 0x3a, + 0x06, 0xbd, 0x84, 0x35, 0xba, 0xe0, 0x32, 0x5c, 0xe5, 0xb3, 0x57, 0x19, 0x7e, 0xa6, 0x70, 0x2a, + 0x3e, 0x8b, 0x42, 0x9f, 0x80, 0x78, 0xdb, 0x9c, 0xd3, 0xc0, 0x94, 0x12, 0xd4, 0x92, 0x14, 0xf0, + 0x07, 0xe8, 0x18, 0x1b, 0xdf, 0x70, 0x37, 0xef, 0x97, 0xef, 0xe6, 0x2a, 0x41, 0x8c, 0x4b, 0xdf, + 0x81, 0xae, 0x99, 0xcc, 0xff, 0xc1, 0xd9, 0xf7, 0xa1, 0x6b, 0xbe, 0x33, 0xd0, 0x57, 0xa0, 0x27, + 0x55, 0xfd, 0x98, 0x6b, 0x1f, 0xe4, 0xd1, 0xef, 0x6e, 0x79, 0xdc, 0x2d, 0x68, 0x89, 0xc7, 0x8c, + 0x31, 0x8c, 0xb9, 0x3d, 0x22, 0xb0, 0x4d, 0xd3, 0xc8, 0x24, 0xc9, 0x1f, 0xbf, 0xa3, 0xcd, 0xfc, + 0xe1, 0x24, 0xc7, 0x86, 0xbd, 0xad, 0xfd, 0x7c, 0x14, 0xc5, 0xfc, 0x7c, 0x31, 0xb5, 0x7d, 0x3a, + 0x1f, 0x18, 0xaf, 0xd6, 0x9b, 0x3f, 0x23, 0x5a, 0x7e, 0xce, 0xfe, 0x53, 0xab, 0x4d, 0x57, 0xa5, + 0xf1, 0xe5, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbb, 0x3d, 0x7b, 0x72, 0x43, 0x0c, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/meta_graph.proto b/executor/proto/tensorflow/core/protobuf/meta_graph.proto new file mode 100644 index 0000000000..1eb2023f01 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/meta_graph.proto @@ -0,0 +1,338 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "MetaGraphProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf"; +import "google/protobuf/any.proto"; + +import "tensorflow/core/framework/graph.proto"; +import "tensorflow/core/framework/op_def.proto"; +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/types.proto"; +import "tensorflow/core/protobuf/saved_object_graph.proto"; +import "tensorflow/core/protobuf/saver.proto"; +import "tensorflow/core/protobuf/struct.proto"; + +// NOTE: This protocol buffer is evolving, and will go through revisions in the +// coming months. +// +// Protocol buffer containing the following which are necessary to restart +// training, run inference. It can be used to serialize/de-serialize memory +// objects necessary for running computation in a graph when crossing the +// process boundary. It can be used for long term storage of graphs, +// cross-language execution of graphs, etc. +// MetaInfoDef +// GraphDef +// SaverDef +// CollectionDef +// TensorInfo +// SignatureDef +message MetaGraphDef { + // Meta information regarding the graph to be exported. To be used by users + // of this protocol buffer to encode information regarding their meta graph. + message MetaInfoDef { + // User specified Version string. Can be the name of the model and revision, + // steps this model has been trained to, etc. + string meta_graph_version = 1; + + // A copy of the OpDefs used by the producer of this graph_def. + // Descriptions and Ops not used in graph_def are stripped out. + OpList stripped_op_list = 2; + + // A serialized protobuf. Can be the time this meta graph is created, or + // modified, or name of the model. + google.protobuf.Any any_info = 3; + + // User supplied tag(s) on the meta_graph and included graph_def. + // + // MetaGraphDefs should be tagged with their capabilities or use-cases. + // Examples: "train", "serve", "gpu", "tpu", etc. + // These tags enable loaders to access the MetaGraph(s) appropriate for a + // specific use-case or runtime environment. + repeated string tags = 4; + + // The __version__ string of the tensorflow build used to write this graph. + // This will be populated by the framework, which will overwrite any user + // supplied value. + string tensorflow_version = 5; + + // The __git_version__ string of the tensorflow build used to write this + // graph. This will be populated by the framework, which will overwrite any + // user supplied value. + string tensorflow_git_version = 6; + + // A flag to denote whether default-valued attrs have been stripped from + // the nodes in this graph_def. + bool stripped_default_attrs = 7; + } + MetaInfoDef meta_info_def = 1; + + // GraphDef. + GraphDef graph_def = 2; + + // SaverDef. + SaverDef saver_def = 3; + + // collection_def: Map from collection name to collections. + // See CollectionDef section for details. + map collection_def = 4; + + // signature_def: Map from user supplied key for a signature to a single + // SignatureDef. + map signature_def = 5; + + // Asset file def to be used with the defined graph. + repeated AssetFileDef asset_file_def = 6; + + // Extra information about the structure of functions and stateful objects. + SavedObjectGraph object_graph_def = 7; +} + +// CollectionDef should cover most collections. +// To add a user-defined collection, do one of the following: +// 1. For simple data types, such as string, int, float: +// tf.add_to_collection("your_collection_name", your_simple_value) +// strings will be stored as bytes_list. +// +// 2. For Protobuf types, there are three ways to add them: +// 1) tf.add_to_collection("your_collection_name", +// your_proto.SerializeToString()) +// +// collection_def { +// key: "user_defined_bytes_collection" +// value { +// bytes_list { +// value: "queue_name: \"test_queue\"\n" +// } +// } +// } +// +// or +// +// 2) tf.add_to_collection("your_collection_name", str(your_proto)) +// +// collection_def { +// key: "user_defined_string_collection" +// value { +// bytes_list { +// value: "\n\ntest_queue" +// } +// } +// } +// +// or +// +// 3) any_buf = any_pb2.Any() +// tf.add_to_collection("your_collection_name", +// any_buf.Pack(your_proto)) +// +// collection_def { +// key: "user_defined_any_collection" +// value { +// any_list { +// value { +// type_url: "type.googleapis.com/tensorflow.QueueRunnerDef" +// value: "\n\ntest_queue" +// } +// } +// } +// } +// +// 3. For Python objects, implement to_proto() and from_proto(), and register +// them in the following manner: +// ops.register_proto_function("your_collection_name", +// proto_type, +// to_proto=YourPythonObject.to_proto, +// from_proto=YourPythonObject.from_proto) +// These functions will be invoked to serialize and de-serialize the +// collection. For example, +// ops.register_proto_function(ops.GraphKeys.GLOBAL_VARIABLES, +// proto_type=variable_pb2.VariableDef, +// to_proto=Variable.to_proto, +// from_proto=Variable.from_proto) +message CollectionDef { + // NodeList is used for collecting nodes in graph. For example + // collection_def { + // key: "summaries" + // value { + // node_list { + // value: "input_producer/ScalarSummary:0" + // value: "shuffle_batch/ScalarSummary:0" + // value: "ImageSummary:0" + // } + // } + message NodeList { + repeated string value = 1; + } + + // BytesList is used for collecting strings and serialized protobufs. For + // example: + // collection_def { + // key: "trainable_variables" + // value { + // bytes_list { + // value: "\n\017conv1/weights:0\022\024conv1/weights/Assign + // \032\024conv1/weights/read:0" + // value: "\n\016conv1/biases:0\022\023conv1/biases/Assign\032 + // \023conv1/biases/read:0" + // } + // } + // } + message BytesList { + repeated bytes value = 1; + } + + // Int64List is used for collecting int, int64 and long values. + message Int64List { + repeated int64 value = 1 [packed = true]; + } + + // FloatList is used for collecting float values. + message FloatList { + repeated float value = 1 [packed = true]; + } + + // AnyList is used for collecting Any protos. + message AnyList { + repeated google.protobuf.Any value = 1; + } + + oneof kind { + NodeList node_list = 1; + BytesList bytes_list = 2; + Int64List int64_list = 3; + FloatList float_list = 4; + AnyList any_list = 5; + } +} + +// Information about a Tensor necessary for feeding or retrieval. +message TensorInfo { + // For sparse tensors, The COO encoding stores a triple of values, indices, + // and shape. + message CooSparse { + // The shape of the values Tensor is [?]. Its dtype must be the dtype of + // the SparseTensor as a whole, given in the enclosing TensorInfo. + string values_tensor_name = 1; + + // The indices Tensor must have dtype int64 and shape [?, ?]. + string indices_tensor_name = 2; + + // The dynamic logical shape represented by the SparseTensor is recorded in + // the Tensor referenced here. It must have dtype int64 and shape [?]. + string dense_shape_tensor_name = 3; + } + + // Generic encoding for composite tensors. + message CompositeTensor { + // The serialized TypeSpec for the composite tensor. + TypeSpecProto type_spec = 1; + + // A TensorInfo for each flattened component tensor. + repeated TensorInfo components = 2; + } + + oneof encoding { + // For dense `Tensor`s, the name of the tensor in the graph. + string name = 1; + // There are many possible encodings of sparse matrices + // (https://en.wikipedia.org/wiki/Sparse_matrix). Currently, TensorFlow + // uses only the COO encoding. This is supported and documented in the + // SparseTensor Python class. + CooSparse coo_sparse = 4; + // Generic encoding for CompositeTensors. + CompositeTensor composite_tensor = 5; + } + DataType dtype = 2; + // The static shape should be recorded here, to the extent that it can + // be known in advance. In the case of a SparseTensor, this field describes + // the logical shape of the represented tensor (aka dense_shape). + TensorShapeProto tensor_shape = 3; +} + +// SignatureDef defines the signature of a computation supported by a TensorFlow +// graph. +// +// For example, a model with two loss computations, sharing a single input, +// might have the following signature_def map. +// +// Note that across the two SignatureDefs "loss_A" and "loss_B", the input key, +// output key, and method_name are identical, and will be used by system(s) that +// implement or rely upon this particular loss method. The output tensor names +// differ, demonstrating how different outputs can exist for the same method. +// +// signature_def { +// key: "loss_A" +// value { +// inputs { +// key: "input" +// value { +// name: "input:0" +// dtype: DT_STRING +// tensor_shape: ... +// } +// } +// outputs { +// key: "loss_output" +// value { +// name: "loss_output_A:0" +// dtype: DT_FLOAT +// tensor_shape: ... +// } +// } +// } +// ... +// method_name: "some/package/compute_loss" +// } +// signature_def { +// key: "loss_B" +// value { +// inputs { +// key: "input" +// value { +// name: "input:0" +// dtype: DT_STRING +// tensor_shape: ... +// } +// } +// outputs { +// key: "loss_output" +// value { +// name: "loss_output_B:0" +// dtype: DT_FLOAT +// tensor_shape: ... +// } +// } +// } +// ... +// method_name: "some/package/compute_loss" +// } +message SignatureDef { + // Named input parameters. + map inputs = 1; + // Named output parameters. + map outputs = 2; + // Extensible method_name information enabling third-party users to mark a + // SignatureDef as supporting a particular method. This enables producers and + // consumers of SignatureDefs, e.g. a model definition library and a serving + // library to have a clear hand-off regarding the semantics of a computation. + // + // Note that multiple SignatureDefs in a single MetaGraphDef may have the same + // method_name. This is commonly used to support multi-headed computation, + // where a single graph computation may return multiple results. + string method_name = 3; +} + +// An asset file def for a single file or a set of sharded files with the same +// name. +message AssetFileDef { + // The tensor to bind the asset filename to. + TensorInfo tensor_info = 1; + // The filename within an assets directory. Note: does not include the path + // prefix, i.e. directories. For an asset at /tmp/path/vocab.txt, the filename + // would be "vocab.txt". + string filename = 2; +} diff --git a/executor/proto/tensorflow/core/protobuf/named_tensor.pb.go b/executor/proto/tensorflow/core/protobuf/named_tensor.pb.go new file mode 100644 index 0000000000..aaceeb25e3 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/named_tensor.pb.go @@ -0,0 +1,101 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/named_tensor.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + framework "github.com/tensorflow/tensorflow/tensorflow/go/core/framework" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// A pair of tensor name and tensor values. +type NamedTensorProto struct { + // Name of the tensor. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The client can populate a TensorProto using a tensorflow::Tensor`, or + // directly using the protobuf field accessors. + // + // The client specifies whether the returned tensor values should be + // filled tensor fields (float_val, int_val, etc.) or encoded in a + // compact form in tensor.tensor_content. + Tensor *framework.TensorProto `protobuf:"bytes,2,opt,name=tensor,proto3" json:"tensor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedTensorProto) Reset() { *m = NamedTensorProto{} } +func (m *NamedTensorProto) String() string { return proto.CompactTextString(m) } +func (*NamedTensorProto) ProtoMessage() {} +func (*NamedTensorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_5c12ce8841c69dcd, []int{0} +} + +func (m *NamedTensorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedTensorProto.Unmarshal(m, b) +} +func (m *NamedTensorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedTensorProto.Marshal(b, m, deterministic) +} +func (m *NamedTensorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedTensorProto.Merge(m, src) +} +func (m *NamedTensorProto) XXX_Size() int { + return xxx_messageInfo_NamedTensorProto.Size(m) +} +func (m *NamedTensorProto) XXX_DiscardUnknown() { + xxx_messageInfo_NamedTensorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedTensorProto proto.InternalMessageInfo + +func (m *NamedTensorProto) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedTensorProto) GetTensor() *framework.TensorProto { + if m != nil { + return m.Tensor + } + return nil +} + +func init() { + proto.RegisterType((*NamedTensorProto)(nil), "tensorflow.NamedTensorProto") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/named_tensor.proto", fileDescriptor_5c12ce8841c69dcd) +} + +var fileDescriptor_5c12ce8841c69dcd = []byte{ + // 181 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x2e, 0x49, 0xcd, 0x2b, + 0xce, 0x2f, 0x4a, 0xcb, 0xc9, 0x2f, 0xd7, 0x4f, 0xce, 0x2f, 0x4a, 0xd5, 0x2f, 0x28, 0xca, 0x2f, + 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0xcf, 0x4b, 0xcc, 0x4d, 0x4d, 0x89, 0x87, 0x48, 0xeb, 0x81, 0x45, + 0x85, 0xb8, 0x10, 0x8a, 0xa5, 0xd4, 0xd0, 0x35, 0xa6, 0x15, 0x25, 0xe6, 0xa6, 0x96, 0xe7, 0x17, + 0x65, 0xeb, 0x23, 0xeb, 0x51, 0x0a, 0xe7, 0x12, 0xf0, 0x03, 0x99, 0x14, 0x02, 0x16, 0x0c, 0x00, + 0x9b, 0x23, 0xc4, 0xc5, 0x02, 0x32, 0x5d, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xcc, 0x16, + 0xd2, 0xe7, 0x62, 0x83, 0xe8, 0x93, 0x60, 0x52, 0x60, 0xd4, 0xe0, 0x36, 0x12, 0xd7, 0x43, 0x58, + 0xa0, 0x87, 0xa4, 0x39, 0x08, 0xaa, 0xcc, 0xa9, 0x80, 0x4b, 0x22, 0xbf, 0x28, 0x1d, 0x59, 0x15, + 0xdc, 0x05, 0x4e, 0x82, 0xe8, 0x56, 0x16, 0x07, 0x30, 0x46, 0xd9, 0xa4, 0x67, 0x96, 0x64, 0x94, + 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x23, 0x39, 0x1e, 0x3b, 0x33, 0x3d, 0x1f, 0x35, 0x38, 0x7e, + 0x30, 0x32, 0x26, 0xb1, 0x81, 0x39, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x66, 0x75, 0xd5, + 0x40, 0x34, 0x01, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/named_tensor.proto b/executor/proto/tensorflow/core/protobuf/named_tensor.proto new file mode 100644 index 0000000000..6e2f7feee2 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/named_tensor.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "NamedTensorProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf"; +import "tensorflow/core/framework/tensor.proto"; + +// A pair of tensor name and tensor values. +message NamedTensorProto { + // Name of the tensor. + string name = 1; + + // The client can populate a TensorProto using a tensorflow::Tensor`, or + // directly using the protobuf field accessors. + // + // The client specifies whether the returned tensor values should be + // filled tensor fields (float_val, int_val, etc.) or encoded in a + // compact form in tensor.tensor_content. + TensorProto tensor = 2; +} diff --git a/executor/proto/tensorflow/core/protobuf/queue_runner.pb.go b/executor/proto/tensorflow/core/protobuf/queue_runner.pb.go new file mode 100644 index 0000000000..36b0e2417a --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/queue_runner.pb.go @@ -0,0 +1,131 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/queue_runner.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + core "github.com/tensorflow/tensorflow/tensorflow/go/core/lib/core" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Protocol buffer representing a QueueRunner. +type QueueRunnerDef struct { + // Queue name. + QueueName string `protobuf:"bytes,1,opt,name=queue_name,json=queueName,proto3" json:"queue_name,omitempty"` + // A list of enqueue operations. + EnqueueOpName []string `protobuf:"bytes,2,rep,name=enqueue_op_name,json=enqueueOpName,proto3" json:"enqueue_op_name,omitempty"` + // The operation to run to close the queue. + CloseOpName string `protobuf:"bytes,3,opt,name=close_op_name,json=closeOpName,proto3" json:"close_op_name,omitempty"` + // The operation to run to cancel the queue. + CancelOpName string `protobuf:"bytes,4,opt,name=cancel_op_name,json=cancelOpName,proto3" json:"cancel_op_name,omitempty"` + // A list of exception types considered to signal a safely closed queue + // if raised during enqueue operations. + QueueClosedExceptionTypes []core.Code `protobuf:"varint,5,rep,packed,name=queue_closed_exception_types,json=queueClosedExceptionTypes,proto3,enum=tensorflow.error.Code" json:"queue_closed_exception_types,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueueRunnerDef) Reset() { *m = QueueRunnerDef{} } +func (m *QueueRunnerDef) String() string { return proto.CompactTextString(m) } +func (*QueueRunnerDef) ProtoMessage() {} +func (*QueueRunnerDef) Descriptor() ([]byte, []int) { + return fileDescriptor_7af35200d68d14ae, []int{0} +} + +func (m *QueueRunnerDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueueRunnerDef.Unmarshal(m, b) +} +func (m *QueueRunnerDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueueRunnerDef.Marshal(b, m, deterministic) +} +func (m *QueueRunnerDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueueRunnerDef.Merge(m, src) +} +func (m *QueueRunnerDef) XXX_Size() int { + return xxx_messageInfo_QueueRunnerDef.Size(m) +} +func (m *QueueRunnerDef) XXX_DiscardUnknown() { + xxx_messageInfo_QueueRunnerDef.DiscardUnknown(m) +} + +var xxx_messageInfo_QueueRunnerDef proto.InternalMessageInfo + +func (m *QueueRunnerDef) GetQueueName() string { + if m != nil { + return m.QueueName + } + return "" +} + +func (m *QueueRunnerDef) GetEnqueueOpName() []string { + if m != nil { + return m.EnqueueOpName + } + return nil +} + +func (m *QueueRunnerDef) GetCloseOpName() string { + if m != nil { + return m.CloseOpName + } + return "" +} + +func (m *QueueRunnerDef) GetCancelOpName() string { + if m != nil { + return m.CancelOpName + } + return "" +} + +func (m *QueueRunnerDef) GetQueueClosedExceptionTypes() []core.Code { + if m != nil { + return m.QueueClosedExceptionTypes + } + return nil +} + +func init() { + proto.RegisterType((*QueueRunnerDef)(nil), "tensorflow.QueueRunnerDef") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/queue_runner.proto", fileDescriptor_7af35200d68d14ae) +} + +var fileDescriptor_7af35200d68d14ae = []byte{ + // 292 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xd1, 0x4b, 0xbc, 0x40, + 0x10, 0xc7, 0xd9, 0xdf, 0xfd, 0x0a, 0x6e, 0xeb, 0x8c, 0x7c, 0x08, 0x8b, 0x02, 0x39, 0x22, 0xa4, + 0x40, 0xa1, 0x5e, 0x7b, 0xba, 0xab, 0xd7, 0xba, 0x24, 0x08, 0x7a, 0x11, 0x5d, 0x47, 0x93, 0x74, + 0x67, 0x5b, 0x95, 0xab, 0xff, 0x3c, 0x7a, 0x0a, 0x67, 0x2d, 0x2d, 0x7a, 0x9b, 0xfd, 0xf2, 0xf9, + 0xcc, 0x30, 0xb3, 0xfc, 0xac, 0x01, 0x59, 0xa3, 0xce, 0x4a, 0x5c, 0x07, 0x02, 0x35, 0x04, 0x4a, + 0x63, 0x83, 0x49, 0x9b, 0x05, 0x2f, 0x2d, 0xb4, 0x10, 0xe9, 0x56, 0x4a, 0xd0, 0x3e, 0xa5, 0x36, + 0x1f, 0xe0, 0x83, 0xd3, 0xdf, 0x62, 0x59, 0x24, 0xa6, 0x00, 0xad, 0x51, 0x47, 0x02, 0x53, 0xa8, + 0x8d, 0x37, 0xff, 0x60, 0xdc, 0xba, 0xeb, 0xda, 0x85, 0xd4, 0xed, 0x0a, 0x32, 0xfb, 0x88, 0x73, + 0x33, 0x40, 0xc6, 0x15, 0x38, 0xcc, 0x65, 0xde, 0x34, 0x9c, 0x52, 0x72, 0x13, 0x57, 0x60, 0x9f, + 0xf0, 0x1d, 0x90, 0x06, 0x40, 0x65, 0x98, 0x7f, 0xee, 0xc4, 0x9b, 0x86, 0xb3, 0x3e, 0xbe, 0x55, + 0xc4, 0xcd, 0xf9, 0x4c, 0x94, 0x58, 0x0f, 0xd4, 0x84, 0x3a, 0x6d, 0x51, 0xd8, 0x33, 0xc7, 0xdc, + 0x12, 0xb1, 0x14, 0x50, 0x7e, 0x43, 0xff, 0x09, 0xda, 0x36, 0x69, 0x4f, 0x3d, 0xf0, 0x43, 0x33, + 0x8f, 0xd4, 0x34, 0x82, 0x57, 0x01, 0xaa, 0x29, 0x50, 0x46, 0xcd, 0x9b, 0x82, 0xda, 0xd9, 0x70, + 0x27, 0x9e, 0x75, 0xbe, 0xe7, 0x0f, 0x6b, 0xfb, 0xb4, 0xa8, 0xbf, 0xc4, 0x14, 0xc2, 0x7d, 0x72, + 0x97, 0xa4, 0x5e, 0x7f, 0x99, 0xf7, 0x9d, 0xb8, 0x50, 0xdc, 0x41, 0x9d, 0x8f, 0xbd, 0x4c, 0xc7, + 0x15, 0xac, 0x51, 0x3f, 0x2f, 0x76, 0x47, 0x57, 0x59, 0x75, 0xa7, 0xaa, 0x57, 0xec, 0xf1, 0x32, + 0x2f, 0x9a, 0xa7, 0x36, 0xf1, 0x05, 0x56, 0xc1, 0xe8, 0xc8, 0x7f, 0x97, 0x39, 0xfe, 0xfc, 0xb6, + 0x77, 0xc6, 0x92, 0x4d, 0x7a, 0x5c, 0x7c, 0x06, 0x00, 0x00, 0xff, 0xff, 0xc4, 0xca, 0xd0, 0x9d, + 0xdc, 0x01, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/queue_runner.proto b/executor/proto/tensorflow/core/protobuf/queue_runner.proto new file mode 100644 index 0000000000..f4df649f7d --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/queue_runner.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "QueueRunnerProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf"; +import "tensorflow/core/lib/core/error_codes.proto"; + +// Protocol buffer representing a QueueRunner. +message QueueRunnerDef { + // Queue name. + string queue_name = 1; + + // A list of enqueue operations. + repeated string enqueue_op_name = 2; + + // The operation to run to close the queue. + string close_op_name = 3; + + // The operation to run to cancel the queue. + string cancel_op_name = 4; + + // A list of exception types considered to signal a safely closed queue + // if raised during enqueue operations. + repeated error.Code queue_closed_exception_types = 5; +} diff --git a/executor/proto/tensorflow/core/protobuf/replay_log.pb.go b/executor/proto/tensorflow/core/protobuf/replay_log.pb.go new file mode 100644 index 0000000000..cbd8aca754 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/replay_log.pb.go @@ -0,0 +1,520 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/replay_log.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Records the creation of a new replay session. We record the device listing +// here to capture the state of the cluster. +type NewReplaySession struct { + Devices *ListDevicesResponse `protobuf:"bytes,1,opt,name=devices,proto3" json:"devices,omitempty"` + SessionHandle string `protobuf:"bytes,2,opt,name=session_handle,json=sessionHandle,proto3" json:"session_handle,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NewReplaySession) Reset() { *m = NewReplaySession{} } +func (m *NewReplaySession) String() string { return proto.CompactTextString(m) } +func (*NewReplaySession) ProtoMessage() {} +func (*NewReplaySession) Descriptor() ([]byte, []int) { + return fileDescriptor_9ee59cdea95caad8, []int{0} +} + +func (m *NewReplaySession) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NewReplaySession.Unmarshal(m, b) +} +func (m *NewReplaySession) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NewReplaySession.Marshal(b, m, deterministic) +} +func (m *NewReplaySession) XXX_Merge(src proto.Message) { + xxx_messageInfo_NewReplaySession.Merge(m, src) +} +func (m *NewReplaySession) XXX_Size() int { + return xxx_messageInfo_NewReplaySession.Size(m) +} +func (m *NewReplaySession) XXX_DiscardUnknown() { + xxx_messageInfo_NewReplaySession.DiscardUnknown(m) +} + +var xxx_messageInfo_NewReplaySession proto.InternalMessageInfo + +func (m *NewReplaySession) GetDevices() *ListDevicesResponse { + if m != nil { + return m.Devices + } + return nil +} + +func (m *NewReplaySession) GetSessionHandle() string { + if m != nil { + return m.SessionHandle + } + return "" +} + +type ReplayOp struct { + StartTimeUs float64 `protobuf:"fixed64,31,opt,name=start_time_us,json=startTimeUs,proto3" json:"start_time_us,omitempty"` + EndTimeUs float64 `protobuf:"fixed64,32,opt,name=end_time_us,json=endTimeUs,proto3" json:"end_time_us,omitempty"` + // Types that are valid to be assigned to Op: + // *ReplayOp_CreateSession + // *ReplayOp_ExtendSession + // *ReplayOp_PartialRunSetup + // *ReplayOp_RunStep + // *ReplayOp_CloseSession + // *ReplayOp_ListDevices + // *ReplayOp_ResetRequest + // *ReplayOp_MakeCallable + // *ReplayOp_RunCallable + // *ReplayOp_ReleaseCallable + // *ReplayOp_NewReplaySession + Op isReplayOp_Op `protobuf_oneof:"op"` + // Types that are valid to be assigned to Response: + // *ReplayOp_CreateSessionResponse + // *ReplayOp_ExtendSessionResponse + // *ReplayOp_PartialRunSetupResponse + // *ReplayOp_RunStepResponse + // *ReplayOp_CloseSessionResponse + // *ReplayOp_ListDevicesResponse + // *ReplayOp_ResetRequestResponse + // *ReplayOp_MakeCallableResponse + // *ReplayOp_RunCallableResponse + // *ReplayOp_ReleaseCallableResponse + Response isReplayOp_Response `protobuf_oneof:"response"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReplayOp) Reset() { *m = ReplayOp{} } +func (m *ReplayOp) String() string { return proto.CompactTextString(m) } +func (*ReplayOp) ProtoMessage() {} +func (*ReplayOp) Descriptor() ([]byte, []int) { + return fileDescriptor_9ee59cdea95caad8, []int{1} +} + +func (m *ReplayOp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReplayOp.Unmarshal(m, b) +} +func (m *ReplayOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReplayOp.Marshal(b, m, deterministic) +} +func (m *ReplayOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplayOp.Merge(m, src) +} +func (m *ReplayOp) XXX_Size() int { + return xxx_messageInfo_ReplayOp.Size(m) +} +func (m *ReplayOp) XXX_DiscardUnknown() { + xxx_messageInfo_ReplayOp.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplayOp proto.InternalMessageInfo + +func (m *ReplayOp) GetStartTimeUs() float64 { + if m != nil { + return m.StartTimeUs + } + return 0 +} + +func (m *ReplayOp) GetEndTimeUs() float64 { + if m != nil { + return m.EndTimeUs + } + return 0 +} + +type isReplayOp_Op interface { + isReplayOp_Op() +} + +type ReplayOp_CreateSession struct { + CreateSession *CreateSessionRequest `protobuf:"bytes,1,opt,name=create_session,json=createSession,proto3,oneof"` +} + +type ReplayOp_ExtendSession struct { + ExtendSession *ExtendSessionRequest `protobuf:"bytes,2,opt,name=extend_session,json=extendSession,proto3,oneof"` +} + +type ReplayOp_PartialRunSetup struct { + PartialRunSetup *PartialRunSetupRequest `protobuf:"bytes,3,opt,name=partial_run_setup,json=partialRunSetup,proto3,oneof"` +} + +type ReplayOp_RunStep struct { + RunStep *RunStepRequest `protobuf:"bytes,4,opt,name=run_step,json=runStep,proto3,oneof"` +} + +type ReplayOp_CloseSession struct { + CloseSession *CloseSessionRequest `protobuf:"bytes,5,opt,name=close_session,json=closeSession,proto3,oneof"` +} + +type ReplayOp_ListDevices struct { + ListDevices *ListDevicesRequest `protobuf:"bytes,6,opt,name=list_devices,json=listDevices,proto3,oneof"` +} + +type ReplayOp_ResetRequest struct { + ResetRequest *ResetRequest `protobuf:"bytes,7,opt,name=reset_request,json=resetRequest,proto3,oneof"` +} + +type ReplayOp_MakeCallable struct { + MakeCallable *MakeCallableRequest `protobuf:"bytes,8,opt,name=make_callable,json=makeCallable,proto3,oneof"` +} + +type ReplayOp_RunCallable struct { + RunCallable *RunCallableRequest `protobuf:"bytes,9,opt,name=run_callable,json=runCallable,proto3,oneof"` +} + +type ReplayOp_ReleaseCallable struct { + ReleaseCallable *ReleaseCallableRequest `protobuf:"bytes,10,opt,name=release_callable,json=releaseCallable,proto3,oneof"` +} + +type ReplayOp_NewReplaySession struct { + NewReplaySession *NewReplaySession `protobuf:"bytes,11,opt,name=new_replay_session,json=newReplaySession,proto3,oneof"` +} + +func (*ReplayOp_CreateSession) isReplayOp_Op() {} + +func (*ReplayOp_ExtendSession) isReplayOp_Op() {} + +func (*ReplayOp_PartialRunSetup) isReplayOp_Op() {} + +func (*ReplayOp_RunStep) isReplayOp_Op() {} + +func (*ReplayOp_CloseSession) isReplayOp_Op() {} + +func (*ReplayOp_ListDevices) isReplayOp_Op() {} + +func (*ReplayOp_ResetRequest) isReplayOp_Op() {} + +func (*ReplayOp_MakeCallable) isReplayOp_Op() {} + +func (*ReplayOp_RunCallable) isReplayOp_Op() {} + +func (*ReplayOp_ReleaseCallable) isReplayOp_Op() {} + +func (*ReplayOp_NewReplaySession) isReplayOp_Op() {} + +func (m *ReplayOp) GetOp() isReplayOp_Op { + if m != nil { + return m.Op + } + return nil +} + +func (m *ReplayOp) GetCreateSession() *CreateSessionRequest { + if x, ok := m.GetOp().(*ReplayOp_CreateSession); ok { + return x.CreateSession + } + return nil +} + +func (m *ReplayOp) GetExtendSession() *ExtendSessionRequest { + if x, ok := m.GetOp().(*ReplayOp_ExtendSession); ok { + return x.ExtendSession + } + return nil +} + +func (m *ReplayOp) GetPartialRunSetup() *PartialRunSetupRequest { + if x, ok := m.GetOp().(*ReplayOp_PartialRunSetup); ok { + return x.PartialRunSetup + } + return nil +} + +func (m *ReplayOp) GetRunStep() *RunStepRequest { + if x, ok := m.GetOp().(*ReplayOp_RunStep); ok { + return x.RunStep + } + return nil +} + +func (m *ReplayOp) GetCloseSession() *CloseSessionRequest { + if x, ok := m.GetOp().(*ReplayOp_CloseSession); ok { + return x.CloseSession + } + return nil +} + +func (m *ReplayOp) GetListDevices() *ListDevicesRequest { + if x, ok := m.GetOp().(*ReplayOp_ListDevices); ok { + return x.ListDevices + } + return nil +} + +func (m *ReplayOp) GetResetRequest() *ResetRequest { + if x, ok := m.GetOp().(*ReplayOp_ResetRequest); ok { + return x.ResetRequest + } + return nil +} + +func (m *ReplayOp) GetMakeCallable() *MakeCallableRequest { + if x, ok := m.GetOp().(*ReplayOp_MakeCallable); ok { + return x.MakeCallable + } + return nil +} + +func (m *ReplayOp) GetRunCallable() *RunCallableRequest { + if x, ok := m.GetOp().(*ReplayOp_RunCallable); ok { + return x.RunCallable + } + return nil +} + +func (m *ReplayOp) GetReleaseCallable() *ReleaseCallableRequest { + if x, ok := m.GetOp().(*ReplayOp_ReleaseCallable); ok { + return x.ReleaseCallable + } + return nil +} + +func (m *ReplayOp) GetNewReplaySession() *NewReplaySession { + if x, ok := m.GetOp().(*ReplayOp_NewReplaySession); ok { + return x.NewReplaySession + } + return nil +} + +type isReplayOp_Response interface { + isReplayOp_Response() +} + +type ReplayOp_CreateSessionResponse struct { + CreateSessionResponse *CreateSessionResponse `protobuf:"bytes,21,opt,name=create_session_response,json=createSessionResponse,proto3,oneof"` +} + +type ReplayOp_ExtendSessionResponse struct { + ExtendSessionResponse *ExtendSessionResponse `protobuf:"bytes,22,opt,name=extend_session_response,json=extendSessionResponse,proto3,oneof"` +} + +type ReplayOp_PartialRunSetupResponse struct { + PartialRunSetupResponse *PartialRunSetupResponse `protobuf:"bytes,23,opt,name=partial_run_setup_response,json=partialRunSetupResponse,proto3,oneof"` +} + +type ReplayOp_RunStepResponse struct { + RunStepResponse *RunStepResponse `protobuf:"bytes,24,opt,name=run_step_response,json=runStepResponse,proto3,oneof"` +} + +type ReplayOp_CloseSessionResponse struct { + CloseSessionResponse *CloseSessionResponse `protobuf:"bytes,25,opt,name=close_session_response,json=closeSessionResponse,proto3,oneof"` +} + +type ReplayOp_ListDevicesResponse struct { + ListDevicesResponse *ListDevicesResponse `protobuf:"bytes,26,opt,name=list_devices_response,json=listDevicesResponse,proto3,oneof"` +} + +type ReplayOp_ResetRequestResponse struct { + ResetRequestResponse *ResetResponse `protobuf:"bytes,27,opt,name=reset_request_response,json=resetRequestResponse,proto3,oneof"` +} + +type ReplayOp_MakeCallableResponse struct { + MakeCallableResponse *MakeCallableResponse `protobuf:"bytes,28,opt,name=make_callable_response,json=makeCallableResponse,proto3,oneof"` +} + +type ReplayOp_RunCallableResponse struct { + RunCallableResponse *RunCallableResponse `protobuf:"bytes,29,opt,name=run_callable_response,json=runCallableResponse,proto3,oneof"` +} + +type ReplayOp_ReleaseCallableResponse struct { + ReleaseCallableResponse *ReleaseCallableResponse `protobuf:"bytes,30,opt,name=release_callable_response,json=releaseCallableResponse,proto3,oneof"` +} + +func (*ReplayOp_CreateSessionResponse) isReplayOp_Response() {} + +func (*ReplayOp_ExtendSessionResponse) isReplayOp_Response() {} + +func (*ReplayOp_PartialRunSetupResponse) isReplayOp_Response() {} + +func (*ReplayOp_RunStepResponse) isReplayOp_Response() {} + +func (*ReplayOp_CloseSessionResponse) isReplayOp_Response() {} + +func (*ReplayOp_ListDevicesResponse) isReplayOp_Response() {} + +func (*ReplayOp_ResetRequestResponse) isReplayOp_Response() {} + +func (*ReplayOp_MakeCallableResponse) isReplayOp_Response() {} + +func (*ReplayOp_RunCallableResponse) isReplayOp_Response() {} + +func (*ReplayOp_ReleaseCallableResponse) isReplayOp_Response() {} + +func (m *ReplayOp) GetResponse() isReplayOp_Response { + if m != nil { + return m.Response + } + return nil +} + +func (m *ReplayOp) GetCreateSessionResponse() *CreateSessionResponse { + if x, ok := m.GetResponse().(*ReplayOp_CreateSessionResponse); ok { + return x.CreateSessionResponse + } + return nil +} + +func (m *ReplayOp) GetExtendSessionResponse() *ExtendSessionResponse { + if x, ok := m.GetResponse().(*ReplayOp_ExtendSessionResponse); ok { + return x.ExtendSessionResponse + } + return nil +} + +func (m *ReplayOp) GetPartialRunSetupResponse() *PartialRunSetupResponse { + if x, ok := m.GetResponse().(*ReplayOp_PartialRunSetupResponse); ok { + return x.PartialRunSetupResponse + } + return nil +} + +func (m *ReplayOp) GetRunStepResponse() *RunStepResponse { + if x, ok := m.GetResponse().(*ReplayOp_RunStepResponse); ok { + return x.RunStepResponse + } + return nil +} + +func (m *ReplayOp) GetCloseSessionResponse() *CloseSessionResponse { + if x, ok := m.GetResponse().(*ReplayOp_CloseSessionResponse); ok { + return x.CloseSessionResponse + } + return nil +} + +func (m *ReplayOp) GetListDevicesResponse() *ListDevicesResponse { + if x, ok := m.GetResponse().(*ReplayOp_ListDevicesResponse); ok { + return x.ListDevicesResponse + } + return nil +} + +func (m *ReplayOp) GetResetRequestResponse() *ResetResponse { + if x, ok := m.GetResponse().(*ReplayOp_ResetRequestResponse); ok { + return x.ResetRequestResponse + } + return nil +} + +func (m *ReplayOp) GetMakeCallableResponse() *MakeCallableResponse { + if x, ok := m.GetResponse().(*ReplayOp_MakeCallableResponse); ok { + return x.MakeCallableResponse + } + return nil +} + +func (m *ReplayOp) GetRunCallableResponse() *RunCallableResponse { + if x, ok := m.GetResponse().(*ReplayOp_RunCallableResponse); ok { + return x.RunCallableResponse + } + return nil +} + +func (m *ReplayOp) GetReleaseCallableResponse() *ReleaseCallableResponse { + if x, ok := m.GetResponse().(*ReplayOp_ReleaseCallableResponse); ok { + return x.ReleaseCallableResponse + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ReplayOp) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ReplayOp_CreateSession)(nil), + (*ReplayOp_ExtendSession)(nil), + (*ReplayOp_PartialRunSetup)(nil), + (*ReplayOp_RunStep)(nil), + (*ReplayOp_CloseSession)(nil), + (*ReplayOp_ListDevices)(nil), + (*ReplayOp_ResetRequest)(nil), + (*ReplayOp_MakeCallable)(nil), + (*ReplayOp_RunCallable)(nil), + (*ReplayOp_ReleaseCallable)(nil), + (*ReplayOp_NewReplaySession)(nil), + (*ReplayOp_CreateSessionResponse)(nil), + (*ReplayOp_ExtendSessionResponse)(nil), + (*ReplayOp_PartialRunSetupResponse)(nil), + (*ReplayOp_RunStepResponse)(nil), + (*ReplayOp_CloseSessionResponse)(nil), + (*ReplayOp_ListDevicesResponse)(nil), + (*ReplayOp_ResetRequestResponse)(nil), + (*ReplayOp_MakeCallableResponse)(nil), + (*ReplayOp_RunCallableResponse)(nil), + (*ReplayOp_ReleaseCallableResponse)(nil), + } +} + +func init() { + proto.RegisterType((*NewReplaySession)(nil), "tensorflow.NewReplaySession") + proto.RegisterType((*ReplayOp)(nil), "tensorflow.ReplayOp") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/replay_log.proto", fileDescriptor_9ee59cdea95caad8) +} + +var fileDescriptor_9ee59cdea95caad8 = []byte{ + // 684 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0x4f, 0x4f, 0xdb, 0x4a, + 0x14, 0xc5, 0x31, 0xf0, 0x20, 0xdc, 0x10, 0xfe, 0xcc, 0x7b, 0x80, 0x09, 0x3c, 0xc8, 0xcb, 0x13, + 0x12, 0xdd, 0x80, 0xd4, 0x2e, 0xaa, 0xae, 0x2a, 0x91, 0xb6, 0x0a, 0x12, 0x2d, 0x74, 0x28, 0x52, + 0xa5, 0x2e, 0xac, 0x89, 0x73, 0x69, 0x2d, 0x26, 0xb6, 0x3b, 0x33, 0x2e, 0xed, 0x37, 0xef, 0xa2, + 0x8b, 0xca, 0x63, 0x3b, 0x33, 0xce, 0x18, 0xd4, 0x65, 0xce, 0x3d, 0xfe, 0xcd, 0xbd, 0xd7, 0x73, + 0x1c, 0x78, 0xa2, 0x30, 0x96, 0x89, 0xb8, 0xe5, 0xc9, 0xfd, 0x69, 0x98, 0x08, 0x3c, 0x4d, 0x45, + 0xa2, 0x92, 0x51, 0x76, 0x7b, 0x2a, 0x30, 0xe5, 0xec, 0x47, 0xc0, 0x93, 0xcf, 0x27, 0x5a, 0x23, + 0x60, 0xac, 0xdd, 0xa3, 0x07, 0x1f, 0x9b, 0x30, 0xa9, 0x50, 0x14, 0x8f, 0xf4, 0x15, 0x6c, 0xbc, + 0xc3, 0x7b, 0xaa, 0x49, 0xd7, 0x28, 0x65, 0x94, 0xc4, 0xe4, 0x05, 0x2c, 0x8f, 0xf1, 0x5b, 0x14, + 0xa2, 0xf4, 0xbd, 0x9e, 0x77, 0xdc, 0x7e, 0x7a, 0x78, 0x62, 0x60, 0x27, 0x17, 0x91, 0x54, 0xaf, + 0x8a, 0x32, 0x45, 0x99, 0x26, 0xb1, 0x44, 0x5a, 0xf9, 0xc9, 0x11, 0xac, 0xc9, 0x82, 0x12, 0x7c, + 0x61, 0xf1, 0x98, 0xa3, 0x3f, 0xdf, 0xf3, 0x8e, 0x57, 0x68, 0xa7, 0x54, 0x87, 0x5a, 0xec, 0xff, + 0xea, 0x40, 0xab, 0x38, 0xf3, 0x32, 0x25, 0x7d, 0xe8, 0x48, 0xc5, 0x84, 0x0a, 0x54, 0x34, 0xc1, + 0x20, 0x93, 0xfe, 0x61, 0xcf, 0x3b, 0xf6, 0x68, 0x5b, 0x8b, 0x1f, 0xa2, 0x09, 0xde, 0x48, 0x72, + 0x00, 0x6d, 0x8c, 0xc7, 0x53, 0x47, 0x4f, 0x3b, 0x56, 0x30, 0x1e, 0x97, 0xf5, 0x73, 0x58, 0x0b, + 0x05, 0x32, 0x85, 0x41, 0x79, 0x50, 0xd9, 0x79, 0xcf, 0xee, 0x7c, 0xa0, 0x1d, 0xe5, 0x94, 0x14, + 0xbf, 0x66, 0x28, 0xd5, 0x70, 0x8e, 0x76, 0x42, 0x5b, 0xcf, 0x51, 0xf8, 0x5d, 0xe5, 0xa7, 0x55, + 0xa8, 0x79, 0x17, 0xf5, 0x5a, 0x3b, 0x5c, 0x14, 0xda, 0x3a, 0xb9, 0x82, 0xcd, 0x94, 0x09, 0x15, + 0x31, 0x1e, 0x88, 0x2c, 0x0e, 0x24, 0xaa, 0x2c, 0xf5, 0x17, 0x34, 0xad, 0x6f, 0xd3, 0xae, 0x0a, + 0x13, 0xcd, 0xe2, 0xeb, 0xdc, 0x62, 0x78, 0xeb, 0x69, 0xbd, 0x42, 0x9e, 0x43, 0x4b, 0x93, 0x14, + 0xa6, 0xfe, 0xa2, 0x06, 0x75, 0x6d, 0x50, 0xee, 0x53, 0x68, 0x01, 0x96, 0x45, 0xa1, 0x90, 0x37, + 0xd0, 0x09, 0x79, 0x22, 0xcd, 0x7e, 0xfe, 0x72, 0xdf, 0xec, 0x20, 0x37, 0x38, 0x33, 0xad, 0x86, + 0x96, 0x4c, 0x06, 0xb0, 0xca, 0x23, 0xa9, 0x82, 0xea, 0x82, 0x2c, 0x69, 0xcc, 0xc1, 0x83, 0x17, + 0xa4, 0xa2, 0xb4, 0xb9, 0x51, 0xc9, 0x4b, 0xe8, 0x08, 0x94, 0xa8, 0x02, 0x51, 0xd4, 0xfd, 0x65, + 0x4d, 0xf1, 0x6b, 0xa3, 0xe4, 0x06, 0xab, 0x0b, 0x61, 0xfd, 0xce, 0xa7, 0x99, 0xb0, 0x3b, 0x0c, + 0x42, 0xc6, 0x39, 0x1b, 0x71, 0xf4, 0x5b, 0xee, 0x34, 0x6f, 0xd9, 0x1d, 0x0e, 0xca, 0xba, 0xc5, + 0x99, 0x58, 0x72, 0x3e, 0x4d, 0xbe, 0xce, 0x29, 0x66, 0xc5, 0x9d, 0x86, 0x66, 0xb1, 0x4b, 0x69, + 0x0b, 0xa3, 0x92, 0x4b, 0xd8, 0x10, 0xc8, 0x91, 0x49, 0xab, 0x1f, 0x70, 0x5f, 0x32, 0x2d, 0x3c, + 0x2e, 0x6c, 0x5d, 0xd4, 0x2b, 0xe4, 0x02, 0x48, 0x8c, 0xf7, 0x41, 0x19, 0xef, 0xea, 0x85, 0xb5, + 0x35, 0x72, 0xdf, 0x46, 0xce, 0x26, 0x77, 0x38, 0x47, 0x37, 0xe2, 0xd9, 0x34, 0x7f, 0x82, 0x9d, + 0x7a, 0x34, 0x02, 0x51, 0xc6, 0xd6, 0xdf, 0xd2, 0xc8, 0xff, 0x1e, 0xc9, 0x48, 0x61, 0x1c, 0x7a, + 0x74, 0x2b, 0x6c, 0x2a, 0xe4, 0xf0, 0x7a, 0x58, 0x0c, 0x7c, 0xdb, 0x85, 0xcf, 0xa4, 0xc6, 0xc0, + 0xb1, 0xa9, 0x40, 0x46, 0xd0, 0x75, 0xe2, 0x63, 0xf8, 0x3b, 0x9a, 0xff, 0xff, 0xa3, 0x39, 0x9a, + 0x9e, 0xb0, 0x93, 0x36, 0x97, 0xc8, 0x39, 0x6c, 0x56, 0x81, 0x32, 0x68, 0x5f, 0xa3, 0xf7, 0x1a, + 0x93, 0x35, 0x45, 0xae, 0x8b, 0xba, 0x44, 0x3e, 0xc2, 0x76, 0x2d, 0x62, 0x86, 0xb7, 0xdb, 0xf0, + 0x2d, 0xaa, 0x65, 0x6d, 0x0a, 0xfd, 0x27, 0x6c, 0xd0, 0xc9, 0x0d, 0x6c, 0xd9, 0xa1, 0x33, 0xe0, + 0xee, 0x1f, 0x7d, 0x9e, 0x87, 0x1e, 0xfd, 0x9b, 0xbb, 0x32, 0x79, 0x0f, 0xdb, 0xb5, 0x18, 0x1a, + 0xee, 0x9e, 0xe6, 0xee, 0x36, 0xe4, 0xd1, 0x74, 0x6a, 0x07, 0xd2, 0xde, 0x41, 0x2d, 0x98, 0x06, + 0xb9, 0xef, 0xee, 0xa0, 0x9e, 0x50, 0x43, 0x9e, 0x34, 0xe8, 0xf9, 0x0e, 0xec, 0xa8, 0x1a, 0xf0, + 0xbf, 0xee, 0x0e, 0x6a, 0x99, 0x35, 0x3b, 0x10, 0xae, 0x4c, 0x18, 0xec, 0xce, 0x86, 0xd7, 0xa0, + 0x0f, 0xdc, 0x2b, 0xe6, 0xa4, 0xd8, 0x5c, 0x31, 0xd1, 0x5c, 0x3a, 0x5b, 0x84, 0xf9, 0x24, 0x3d, + 0x03, 0x68, 0x55, 0xdc, 0xb3, 0x85, 0x9f, 0x9e, 0x37, 0x5a, 0xd2, 0x7f, 0xc0, 0xcf, 0x7e, 0x07, + 0x00, 0x00, 0xff, 0xff, 0x28, 0x16, 0x50, 0xfb, 0xe0, 0x07, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/replay_log.proto b/executor/proto/tensorflow/core/protobuf/replay_log.proto new file mode 100644 index 0000000000..5506ec0c8e --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/replay_log.proto @@ -0,0 +1,46 @@ +syntax = "proto3"; + +package tensorflow; + +import "tensorflow/core/protobuf/master.proto"; + +option cc_enable_arenas = true; + +// Records the creation of a new replay session. We record the device listing +// here to capture the state of the cluster. +message NewReplaySession { + ListDevicesResponse devices = 1; + string session_handle = 2; +} + +message ReplayOp { + double start_time_us = 31; + double end_time_us = 32; + + oneof op { + CreateSessionRequest create_session = 1; + ExtendSessionRequest extend_session = 2; + PartialRunSetupRequest partial_run_setup = 3; + RunStepRequest run_step = 4; + CloseSessionRequest close_session = 5; + ListDevicesRequest list_devices = 6; + ResetRequest reset_request = 7; + MakeCallableRequest make_callable = 8; + RunCallableRequest run_callable = 9; + ReleaseCallableRequest release_callable = 10; + NewReplaySession new_replay_session = 11; + } + + oneof response { + CreateSessionResponse create_session_response = 21; + ExtendSessionResponse extend_session_response = 22; + PartialRunSetupResponse partial_run_setup_response = 23; + RunStepResponse run_step_response = 24; + CloseSessionResponse close_session_response = 25; + ListDevicesResponse list_devices_response = 26; + ResetResponse reset_request_response = 27; + MakeCallableResponse make_callable_response = 28; + RunCallableResponse run_callable_response = 29; + ReleaseCallableResponse release_callable_response = 30; + } +} diff --git a/executor/proto/tensorflow/core/protobuf/rewriter_config.pb.go b/executor/proto/tensorflow/core/protobuf/rewriter_config.pb.go new file mode 100644 index 0000000000..e6f2d1327e --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/rewriter_config.pb.go @@ -0,0 +1,681 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/rewriter_config.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + framework "github.com/tensorflow/tensorflow/tensorflow/go/core/framework" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type RewriterConfig_Toggle int32 + +const ( + RewriterConfig_DEFAULT RewriterConfig_Toggle = 0 + RewriterConfig_ON RewriterConfig_Toggle = 1 + RewriterConfig_OFF RewriterConfig_Toggle = 2 + // Enable some aggressive optimizations that use assumptions that TF graphs + // may break. For example, assume the shape of a placeholder matches its + // actual feed. + RewriterConfig_AGGRESSIVE RewriterConfig_Toggle = 3 +) + +var RewriterConfig_Toggle_name = map[int32]string{ + 0: "DEFAULT", + 1: "ON", + 2: "OFF", + 3: "AGGRESSIVE", +} + +var RewriterConfig_Toggle_value = map[string]int32{ + "DEFAULT": 0, + "ON": 1, + "OFF": 2, + "AGGRESSIVE": 3, +} + +func (x RewriterConfig_Toggle) String() string { + return proto.EnumName(RewriterConfig_Toggle_name, int32(x)) +} + +func (RewriterConfig_Toggle) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_1dd7de60bf190bbb, []int{2, 0} +} + +// Enum controlling the number of times to run optimizers. The default is to +// run them twice. +type RewriterConfig_NumIterationsType int32 + +const ( + RewriterConfig_DEFAULT_NUM_ITERS RewriterConfig_NumIterationsType = 0 + RewriterConfig_ONE RewriterConfig_NumIterationsType = 1 + RewriterConfig_TWO RewriterConfig_NumIterationsType = 2 +) + +var RewriterConfig_NumIterationsType_name = map[int32]string{ + 0: "DEFAULT_NUM_ITERS", + 1: "ONE", + 2: "TWO", +} + +var RewriterConfig_NumIterationsType_value = map[string]int32{ + "DEFAULT_NUM_ITERS": 0, + "ONE": 1, + "TWO": 2, +} + +func (x RewriterConfig_NumIterationsType) String() string { + return proto.EnumName(RewriterConfig_NumIterationsType_name, int32(x)) +} + +func (RewriterConfig_NumIterationsType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_1dd7de60bf190bbb, []int{2, 1} +} + +type RewriterConfig_MemOptType int32 + +const ( + // The default setting (SCHEDULING and SWAPPING HEURISTICS only) + RewriterConfig_DEFAULT_MEM_OPT RewriterConfig_MemOptType = 0 + // Disabled in the meta-optimizer. + RewriterConfig_NO_MEM_OPT RewriterConfig_MemOptType = 1 + // Driven by manual op-level annotations. + RewriterConfig_MANUAL RewriterConfig_MemOptType = 2 + // Swapping heuristic will move a tensor from the GPU to the CPU and move + // it back when needed to reduce peak memory usage. + RewriterConfig_SWAPPING_HEURISTICS RewriterConfig_MemOptType = 4 + // Recomputation heuristics will recompute ops (such as Relu activation) + // during backprop instead of storing them, reducing peak memory usage. + RewriterConfig_RECOMPUTATION_HEURISTICS RewriterConfig_MemOptType = 5 + // Scheduling will split big ops such as AddN and try to enforce a schedule + // of the new computations that decreases peak memory usage. + RewriterConfig_SCHEDULING_HEURISTICS RewriterConfig_MemOptType = 6 + // Use any combination of swapping and recomputation heuristics. + RewriterConfig_HEURISTICS RewriterConfig_MemOptType = 3 +) + +var RewriterConfig_MemOptType_name = map[int32]string{ + 0: "DEFAULT_MEM_OPT", + 1: "NO_MEM_OPT", + 2: "MANUAL", + 4: "SWAPPING_HEURISTICS", + 5: "RECOMPUTATION_HEURISTICS", + 6: "SCHEDULING_HEURISTICS", + 3: "HEURISTICS", +} + +var RewriterConfig_MemOptType_value = map[string]int32{ + "DEFAULT_MEM_OPT": 0, + "NO_MEM_OPT": 1, + "MANUAL": 2, + "SWAPPING_HEURISTICS": 4, + "RECOMPUTATION_HEURISTICS": 5, + "SCHEDULING_HEURISTICS": 6, + "HEURISTICS": 3, +} + +func (x RewriterConfig_MemOptType) String() string { + return proto.EnumName(RewriterConfig_MemOptType_name, int32(x)) +} + +func (RewriterConfig_MemOptType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_1dd7de60bf190bbb, []int{2, 2} +} + +type AutoParallelOptions struct { + Enable bool `protobuf:"varint,1,opt,name=enable,proto3" json:"enable,omitempty"` + NumReplicas int32 `protobuf:"varint,2,opt,name=num_replicas,json=numReplicas,proto3" json:"num_replicas,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AutoParallelOptions) Reset() { *m = AutoParallelOptions{} } +func (m *AutoParallelOptions) String() string { return proto.CompactTextString(m) } +func (*AutoParallelOptions) ProtoMessage() {} +func (*AutoParallelOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_1dd7de60bf190bbb, []int{0} +} + +func (m *AutoParallelOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AutoParallelOptions.Unmarshal(m, b) +} +func (m *AutoParallelOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AutoParallelOptions.Marshal(b, m, deterministic) +} +func (m *AutoParallelOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_AutoParallelOptions.Merge(m, src) +} +func (m *AutoParallelOptions) XXX_Size() int { + return xxx_messageInfo_AutoParallelOptions.Size(m) +} +func (m *AutoParallelOptions) XXX_DiscardUnknown() { + xxx_messageInfo_AutoParallelOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_AutoParallelOptions proto.InternalMessageInfo + +func (m *AutoParallelOptions) GetEnable() bool { + if m != nil { + return m.Enable + } + return false +} + +func (m *AutoParallelOptions) GetNumReplicas() int32 { + if m != nil { + return m.NumReplicas + } + return 0 +} + +type ScopedAllocatorOptions struct { + // If present, only perform optimization for these ops. + EnableOp []string `protobuf:"bytes,1,rep,name=enable_op,json=enableOp,proto3" json:"enable_op,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ScopedAllocatorOptions) Reset() { *m = ScopedAllocatorOptions{} } +func (m *ScopedAllocatorOptions) String() string { return proto.CompactTextString(m) } +func (*ScopedAllocatorOptions) ProtoMessage() {} +func (*ScopedAllocatorOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_1dd7de60bf190bbb, []int{1} +} + +func (m *ScopedAllocatorOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ScopedAllocatorOptions.Unmarshal(m, b) +} +func (m *ScopedAllocatorOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ScopedAllocatorOptions.Marshal(b, m, deterministic) +} +func (m *ScopedAllocatorOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScopedAllocatorOptions.Merge(m, src) +} +func (m *ScopedAllocatorOptions) XXX_Size() int { + return xxx_messageInfo_ScopedAllocatorOptions.Size(m) +} +func (m *ScopedAllocatorOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ScopedAllocatorOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ScopedAllocatorOptions proto.InternalMessageInfo + +func (m *ScopedAllocatorOptions) GetEnableOp() []string { + if m != nil { + return m.EnableOp + } + return nil +} + +type RewriterConfig struct { + // Optimize tensor layouts (default is ON) + // e.g. This will try to use NCHW layout on GPU which is faster. + LayoutOptimizer RewriterConfig_Toggle `protobuf:"varint,1,opt,name=layout_optimizer,json=layoutOptimizer,proto3,enum=tensorflow.RewriterConfig_Toggle" json:"layout_optimizer,omitempty"` + // Fold constants (default is ON) + // Statically infer the value of tensors when possible, and materialize the + // result using constants. + ConstantFolding RewriterConfig_Toggle `protobuf:"varint,3,opt,name=constant_folding,json=constantFolding,proto3,enum=tensorflow.RewriterConfig_Toggle" json:"constant_folding,omitempty"` + // Shape optimizations (default is ON) + // Simplify computations made on shapes. + ShapeOptimization RewriterConfig_Toggle `protobuf:"varint,13,opt,name=shape_optimization,json=shapeOptimization,proto3,enum=tensorflow.RewriterConfig_Toggle" json:"shape_optimization,omitempty"` + // Remapping (default is ON) + // Remap subgraphs onto more efficient implementations. + Remapping RewriterConfig_Toggle `protobuf:"varint,14,opt,name=remapping,proto3,enum=tensorflow.RewriterConfig_Toggle" json:"remapping,omitempty"` + // Arithmetic optimizations (default is ON) + // e.g. Simplify arithmetic ops; merge ops with same value (like constants). + ArithmeticOptimization RewriterConfig_Toggle `protobuf:"varint,7,opt,name=arithmetic_optimization,json=arithmeticOptimization,proto3,enum=tensorflow.RewriterConfig_Toggle" json:"arithmetic_optimization,omitempty"` + // Control dependency optimizations (default is ON). + // Remove redundant control dependencies, which may enable other optimization. + DependencyOptimization RewriterConfig_Toggle `protobuf:"varint,8,opt,name=dependency_optimization,json=dependencyOptimization,proto3,enum=tensorflow.RewriterConfig_Toggle" json:"dependency_optimization,omitempty"` + // Loop optimizations (default is ON). + LoopOptimization RewriterConfig_Toggle `protobuf:"varint,9,opt,name=loop_optimization,json=loopOptimization,proto3,enum=tensorflow.RewriterConfig_Toggle" json:"loop_optimization,omitempty"` + // Function optimizations (default is ON). + FunctionOptimization RewriterConfig_Toggle `protobuf:"varint,10,opt,name=function_optimization,json=functionOptimization,proto3,enum=tensorflow.RewriterConfig_Toggle" json:"function_optimization,omitempty"` + // Strips debug-related nodes from the graph (off by default). + DebugStripper RewriterConfig_Toggle `protobuf:"varint,11,opt,name=debug_stripper,json=debugStripper,proto3,enum=tensorflow.RewriterConfig_Toggle" json:"debug_stripper,omitempty"` + // If true, don't remove unnecessary ops from the graph + DisableModelPruning bool `protobuf:"varint,2,opt,name=disable_model_pruning,json=disableModelPruning,proto3" json:"disable_model_pruning,omitempty"` + // Try to allocate some independent Op outputs contiguously in order to + // merge or eliminate downstream Ops (off by default). + ScopedAllocatorOptimization RewriterConfig_Toggle `protobuf:"varint,15,opt,name=scoped_allocator_optimization,json=scopedAllocatorOptimization,proto3,enum=tensorflow.RewriterConfig_Toggle" json:"scoped_allocator_optimization,omitempty"` + // Force small ops onto the CPU (default is OFF). + PinToHostOptimization RewriterConfig_Toggle `protobuf:"varint,18,opt,name=pin_to_host_optimization,json=pinToHostOptimization,proto3,enum=tensorflow.RewriterConfig_Toggle" json:"pin_to_host_optimization,omitempty"` + // Enable the swap of kernel implementations based on the device placement + // (default is ON). + ImplementationSelector RewriterConfig_Toggle `protobuf:"varint,22,opt,name=implementation_selector,json=implementationSelector,proto3,enum=tensorflow.RewriterConfig_Toggle" json:"implementation_selector,omitempty"` + // Optimize data types (default is OFF). + // e.g., This will try to use float16 on GPU which is faster. + // Note that this can change the numerical stability of the graph and may + // require the use of loss scaling to maintain model convergence. + AutoMixedPrecision RewriterConfig_Toggle `protobuf:"varint,23,opt,name=auto_mixed_precision,json=autoMixedPrecision,proto3,enum=tensorflow.RewriterConfig_Toggle" json:"auto_mixed_precision,omitempty"` + // Disable the entire meta optimizer (off by default). + DisableMetaOptimizer bool `protobuf:"varint,19,opt,name=disable_meta_optimizer,json=disableMetaOptimizer,proto3" json:"disable_meta_optimizer,omitempty"` + // Controls how many times we run the optimizers in meta optimizer (default + // is once). + MetaOptimizerIterations RewriterConfig_NumIterationsType `protobuf:"varint,12,opt,name=meta_optimizer_iterations,json=metaOptimizerIterations,proto3,enum=tensorflow.RewriterConfig_NumIterationsType" json:"meta_optimizer_iterations,omitempty"` + // The minimum number of nodes in a graph to optimizer. For smaller graphs, + // optimization is skipped. + // 0 means the system picks an appropriate number. + // < 0 means do not skip optimization. + MinGraphNodes int32 `protobuf:"varint,17,opt,name=min_graph_nodes,json=minGraphNodes,proto3" json:"min_graph_nodes,omitempty"` + // Configures memory optimization passes through the meta-optimizer. Has no + // effect on manually requested memory optimization passes in the optimizers + // field. + MemoryOptimization RewriterConfig_MemOptType `protobuf:"varint,4,opt,name=memory_optimization,json=memoryOptimization,proto3,enum=tensorflow.RewriterConfig_MemOptType" json:"memory_optimization,omitempty"` + // A node name scope for node names which are valid outputs of recompuations. + // Inputs to nodes that match this scope may be recomputed (subject either to + // manual annotation of those input nodes or to manual annotation and + // heuristics depending on memory_optimization), but the nodes themselves will + // not be recomputed. This matches any sub-scopes as well, meaning the scope + // can appear not just as a top-level scope. For example, if the value is + // "gradients/", the default, it will match node name "gradients/foo", + // "foo/gradients/bar", but not "foo_gradients/" + MemoryOptimizerTargetNodeNameScope string `protobuf:"bytes,6,opt,name=memory_optimizer_target_node_name_scope,json=memoryOptimizerTargetNodeNameScope,proto3" json:"memory_optimizer_target_node_name_scope,omitempty"` + // Maximum number of milliseconds to spend optimizing a single graph before + // timing out. If equal to 0 the system picks a default (currently 5 minutes). + // If less than 0 the optimizer will never time out. + MetaOptimizerTimeoutMs int64 `protobuf:"varint,20,opt,name=meta_optimizer_timeout_ms,json=metaOptimizerTimeoutMs,proto3" json:"meta_optimizer_timeout_ms,omitempty"` + // Configures AutoParallel optimization passes either through the + // meta-optimizer or when manually specified through the optimizers field. + AutoParallel *AutoParallelOptions `protobuf:"bytes,5,opt,name=auto_parallel,json=autoParallel,proto3" json:"auto_parallel,omitempty"` + // If true, any optimization pass failing will cause the MetaOptimizer to + // stop with an error. By default - or when set to false, failing passes are + // skipped silently. + FailOnOptimizerErrors bool `protobuf:"varint,21,opt,name=fail_on_optimizer_errors,json=failOnOptimizerErrors,proto3" json:"fail_on_optimizer_errors,omitempty"` + ScopedAllocatorOpts *ScopedAllocatorOptions `protobuf:"bytes,16,opt,name=scoped_allocator_opts,json=scopedAllocatorOpts,proto3" json:"scoped_allocator_opts,omitempty"` + // If non-empty, will use this as an alternative way to specify a list of + // optimizations to turn on and the order of the optimizations (replacing the + // meta-optimizer). + // + // Of the RewriterConfig options, only the AutoParallel configuration options + // (the auto_parallel field) apply to manually requested optimization passes + // ("autoparallel"). Memory optimization passes ("memory") invoked here are + // not configurable (in contrast to memory optimization passes through the + // meta-optimizer) and act only on manual op annotations. + // + // Custom optimizers (see custom_optimizers) that are not part of this + // schedule will be run after - in the order that they were specified. + Optimizers []string `protobuf:"bytes,100,rep,name=optimizers,proto3" json:"optimizers,omitempty"` + // list of CustomGraphOptimizers to apply. + CustomOptimizers []*RewriterConfig_CustomGraphOptimizer `protobuf:"bytes,200,rep,name=custom_optimizers,json=customOptimizers,proto3" json:"custom_optimizers,omitempty"` + // VerifierConfig specifying the verifiers to be run after every optimizer. + InterOptimizerVerifierConfig *VerifierConfig `protobuf:"bytes,300,opt,name=inter_optimizer_verifier_config,json=interOptimizerVerifierConfig,proto3" json:"inter_optimizer_verifier_config,omitempty"` + // VerifierConfig specifying the verifiers to be run at the end, after all + // optimizers have run. + PostOptimizationVerifierConfig *VerifierConfig `protobuf:"bytes,301,opt,name=post_optimization_verifier_config,json=postOptimizationVerifierConfig,proto3" json:"post_optimization_verifier_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RewriterConfig) Reset() { *m = RewriterConfig{} } +func (m *RewriterConfig) String() string { return proto.CompactTextString(m) } +func (*RewriterConfig) ProtoMessage() {} +func (*RewriterConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_1dd7de60bf190bbb, []int{2} +} + +func (m *RewriterConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RewriterConfig.Unmarshal(m, b) +} +func (m *RewriterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RewriterConfig.Marshal(b, m, deterministic) +} +func (m *RewriterConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_RewriterConfig.Merge(m, src) +} +func (m *RewriterConfig) XXX_Size() int { + return xxx_messageInfo_RewriterConfig.Size(m) +} +func (m *RewriterConfig) XXX_DiscardUnknown() { + xxx_messageInfo_RewriterConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_RewriterConfig proto.InternalMessageInfo + +func (m *RewriterConfig) GetLayoutOptimizer() RewriterConfig_Toggle { + if m != nil { + return m.LayoutOptimizer + } + return RewriterConfig_DEFAULT +} + +func (m *RewriterConfig) GetConstantFolding() RewriterConfig_Toggle { + if m != nil { + return m.ConstantFolding + } + return RewriterConfig_DEFAULT +} + +func (m *RewriterConfig) GetShapeOptimization() RewriterConfig_Toggle { + if m != nil { + return m.ShapeOptimization + } + return RewriterConfig_DEFAULT +} + +func (m *RewriterConfig) GetRemapping() RewriterConfig_Toggle { + if m != nil { + return m.Remapping + } + return RewriterConfig_DEFAULT +} + +func (m *RewriterConfig) GetArithmeticOptimization() RewriterConfig_Toggle { + if m != nil { + return m.ArithmeticOptimization + } + return RewriterConfig_DEFAULT +} + +func (m *RewriterConfig) GetDependencyOptimization() RewriterConfig_Toggle { + if m != nil { + return m.DependencyOptimization + } + return RewriterConfig_DEFAULT +} + +func (m *RewriterConfig) GetLoopOptimization() RewriterConfig_Toggle { + if m != nil { + return m.LoopOptimization + } + return RewriterConfig_DEFAULT +} + +func (m *RewriterConfig) GetFunctionOptimization() RewriterConfig_Toggle { + if m != nil { + return m.FunctionOptimization + } + return RewriterConfig_DEFAULT +} + +func (m *RewriterConfig) GetDebugStripper() RewriterConfig_Toggle { + if m != nil { + return m.DebugStripper + } + return RewriterConfig_DEFAULT +} + +func (m *RewriterConfig) GetDisableModelPruning() bool { + if m != nil { + return m.DisableModelPruning + } + return false +} + +func (m *RewriterConfig) GetScopedAllocatorOptimization() RewriterConfig_Toggle { + if m != nil { + return m.ScopedAllocatorOptimization + } + return RewriterConfig_DEFAULT +} + +func (m *RewriterConfig) GetPinToHostOptimization() RewriterConfig_Toggle { + if m != nil { + return m.PinToHostOptimization + } + return RewriterConfig_DEFAULT +} + +func (m *RewriterConfig) GetImplementationSelector() RewriterConfig_Toggle { + if m != nil { + return m.ImplementationSelector + } + return RewriterConfig_DEFAULT +} + +func (m *RewriterConfig) GetAutoMixedPrecision() RewriterConfig_Toggle { + if m != nil { + return m.AutoMixedPrecision + } + return RewriterConfig_DEFAULT +} + +func (m *RewriterConfig) GetDisableMetaOptimizer() bool { + if m != nil { + return m.DisableMetaOptimizer + } + return false +} + +func (m *RewriterConfig) GetMetaOptimizerIterations() RewriterConfig_NumIterationsType { + if m != nil { + return m.MetaOptimizerIterations + } + return RewriterConfig_DEFAULT_NUM_ITERS +} + +func (m *RewriterConfig) GetMinGraphNodes() int32 { + if m != nil { + return m.MinGraphNodes + } + return 0 +} + +func (m *RewriterConfig) GetMemoryOptimization() RewriterConfig_MemOptType { + if m != nil { + return m.MemoryOptimization + } + return RewriterConfig_DEFAULT_MEM_OPT +} + +func (m *RewriterConfig) GetMemoryOptimizerTargetNodeNameScope() string { + if m != nil { + return m.MemoryOptimizerTargetNodeNameScope + } + return "" +} + +func (m *RewriterConfig) GetMetaOptimizerTimeoutMs() int64 { + if m != nil { + return m.MetaOptimizerTimeoutMs + } + return 0 +} + +func (m *RewriterConfig) GetAutoParallel() *AutoParallelOptions { + if m != nil { + return m.AutoParallel + } + return nil +} + +func (m *RewriterConfig) GetFailOnOptimizerErrors() bool { + if m != nil { + return m.FailOnOptimizerErrors + } + return false +} + +func (m *RewriterConfig) GetScopedAllocatorOpts() *ScopedAllocatorOptions { + if m != nil { + return m.ScopedAllocatorOpts + } + return nil +} + +func (m *RewriterConfig) GetOptimizers() []string { + if m != nil { + return m.Optimizers + } + return nil +} + +func (m *RewriterConfig) GetCustomOptimizers() []*RewriterConfig_CustomGraphOptimizer { + if m != nil { + return m.CustomOptimizers + } + return nil +} + +func (m *RewriterConfig) GetInterOptimizerVerifierConfig() *VerifierConfig { + if m != nil { + return m.InterOptimizerVerifierConfig + } + return nil +} + +func (m *RewriterConfig) GetPostOptimizationVerifierConfig() *VerifierConfig { + if m != nil { + return m.PostOptimizationVerifierConfig + } + return nil +} + +// Message to describe custom graph optimizer and its parameters +type RewriterConfig_CustomGraphOptimizer struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + ParameterMap map[string]*framework.AttrValue `protobuf:"bytes,2,rep,name=parameter_map,json=parameterMap,proto3" json:"parameter_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RewriterConfig_CustomGraphOptimizer) Reset() { *m = RewriterConfig_CustomGraphOptimizer{} } +func (m *RewriterConfig_CustomGraphOptimizer) String() string { return proto.CompactTextString(m) } +func (*RewriterConfig_CustomGraphOptimizer) ProtoMessage() {} +func (*RewriterConfig_CustomGraphOptimizer) Descriptor() ([]byte, []int) { + return fileDescriptor_1dd7de60bf190bbb, []int{2, 0} +} + +func (m *RewriterConfig_CustomGraphOptimizer) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RewriterConfig_CustomGraphOptimizer.Unmarshal(m, b) +} +func (m *RewriterConfig_CustomGraphOptimizer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RewriterConfig_CustomGraphOptimizer.Marshal(b, m, deterministic) +} +func (m *RewriterConfig_CustomGraphOptimizer) XXX_Merge(src proto.Message) { + xxx_messageInfo_RewriterConfig_CustomGraphOptimizer.Merge(m, src) +} +func (m *RewriterConfig_CustomGraphOptimizer) XXX_Size() int { + return xxx_messageInfo_RewriterConfig_CustomGraphOptimizer.Size(m) +} +func (m *RewriterConfig_CustomGraphOptimizer) XXX_DiscardUnknown() { + xxx_messageInfo_RewriterConfig_CustomGraphOptimizer.DiscardUnknown(m) +} + +var xxx_messageInfo_RewriterConfig_CustomGraphOptimizer proto.InternalMessageInfo + +func (m *RewriterConfig_CustomGraphOptimizer) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *RewriterConfig_CustomGraphOptimizer) GetParameterMap() map[string]*framework.AttrValue { + if m != nil { + return m.ParameterMap + } + return nil +} + +func init() { + proto.RegisterEnum("tensorflow.RewriterConfig_Toggle", RewriterConfig_Toggle_name, RewriterConfig_Toggle_value) + proto.RegisterEnum("tensorflow.RewriterConfig_NumIterationsType", RewriterConfig_NumIterationsType_name, RewriterConfig_NumIterationsType_value) + proto.RegisterEnum("tensorflow.RewriterConfig_MemOptType", RewriterConfig_MemOptType_name, RewriterConfig_MemOptType_value) + proto.RegisterType((*AutoParallelOptions)(nil), "tensorflow.AutoParallelOptions") + proto.RegisterType((*ScopedAllocatorOptions)(nil), "tensorflow.ScopedAllocatorOptions") + proto.RegisterType((*RewriterConfig)(nil), "tensorflow.RewriterConfig") + proto.RegisterType((*RewriterConfig_CustomGraphOptimizer)(nil), "tensorflow.RewriterConfig.CustomGraphOptimizer") + proto.RegisterMapType((map[string]*framework.AttrValue)(nil), "tensorflow.RewriterConfig.CustomGraphOptimizer.ParameterMapEntry") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/rewriter_config.proto", fileDescriptor_1dd7de60bf190bbb) +} + +var fileDescriptor_1dd7de60bf190bbb = []byte{ + // 1206 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0xeb, 0x6e, 0xdb, 0x36, + 0x14, 0xc7, 0x2b, 0xbb, 0x71, 0x9b, 0x93, 0x9b, 0x4c, 0xdb, 0x89, 0x9a, 0x76, 0x6d, 0x6a, 0x60, + 0x5b, 0xb0, 0x0d, 0x0e, 0x90, 0xdd, 0x87, 0x02, 0x83, 0x9b, 0x3a, 0x89, 0x81, 0xd8, 0x32, 0x64, + 0x27, 0x05, 0x0a, 0x0c, 0x04, 0x23, 0xd3, 0x8e, 0x50, 0x51, 0x24, 0x28, 0xaa, 0x5d, 0xf6, 0x32, + 0x7b, 0x81, 0xed, 0x3d, 0xf6, 0x28, 0xc3, 0x9e, 0x60, 0x1f, 0x07, 0x52, 0xbe, 0x48, 0x76, 0xd6, + 0x79, 0xdf, 0x24, 0x1e, 0xfe, 0x7f, 0xfc, 0xf3, 0x76, 0x78, 0xa0, 0xa1, 0x68, 0x14, 0x73, 0x39, + 0x0a, 0xf9, 0xfb, 0x23, 0x9f, 0x4b, 0x7a, 0x24, 0x24, 0x57, 0xfc, 0x3a, 0x19, 0x1d, 0x49, 0xfa, + 0x5e, 0x06, 0x8a, 0x4a, 0xec, 0xf3, 0x68, 0x14, 0x8c, 0x1b, 0x26, 0x80, 0x60, 0xde, 0x7f, 0xff, + 0xb3, 0x45, 0xed, 0x48, 0x12, 0x46, 0xdf, 0x73, 0xf9, 0xf6, 0x88, 0x28, 0x25, 0xf1, 0x3b, 0x12, + 0x26, 0x34, 0xd5, 0xed, 0xff, 0xfb, 0x38, 0xef, 0xa8, 0x0c, 0x46, 0xc1, 0xc2, 0x38, 0xf5, 0x1e, + 0x54, 0x9a, 0x89, 0xe2, 0x3d, 0x22, 0x49, 0x18, 0xd2, 0xd0, 0x15, 0x2a, 0xe0, 0x51, 0x8c, 0x76, + 0xa1, 0x44, 0x23, 0x72, 0x1d, 0x52, 0xc7, 0x3a, 0xb0, 0x0e, 0x1f, 0x7a, 0x93, 0x3f, 0xf4, 0x1c, + 0x36, 0xa3, 0x84, 0x61, 0x49, 0x45, 0x18, 0xf8, 0x24, 0x76, 0x0a, 0x07, 0xd6, 0xe1, 0x9a, 0xb7, + 0x11, 0x25, 0xcc, 0x9b, 0x34, 0xd5, 0xbf, 0x86, 0xdd, 0xbe, 0xcf, 0x05, 0x1d, 0x36, 0xc3, 0x90, + 0xfb, 0x44, 0x71, 0x39, 0x85, 0x3e, 0x86, 0xf5, 0x14, 0x83, 0xb9, 0x70, 0xac, 0x83, 0xe2, 0xe1, + 0xba, 0xf7, 0x30, 0x6d, 0x70, 0x45, 0xfd, 0xcf, 0x2a, 0x6c, 0x7b, 0x93, 0xa5, 0x38, 0x31, 0x0e, + 0xd1, 0x05, 0xd8, 0x21, 0xb9, 0xe5, 0x89, 0xc2, 0x5c, 0xa8, 0x80, 0x05, 0xbf, 0x50, 0x69, 0xec, + 0x6c, 0x1f, 0x3f, 0xcf, 0x4c, 0xb3, 0x91, 0x57, 0x35, 0x06, 0x7c, 0x3c, 0x0e, 0xa9, 0xb7, 0x93, + 0x4a, 0xdd, 0xa9, 0x52, 0xd3, 0x7c, 0x1e, 0xc5, 0x8a, 0x44, 0x0a, 0x8f, 0x78, 0x38, 0x0c, 0xa2, + 0xb1, 0x53, 0x5c, 0x99, 0x36, 0x95, 0x9e, 0xa6, 0x4a, 0xd4, 0x03, 0x14, 0xdf, 0x10, 0x41, 0xa7, + 0xd6, 0x88, 0x9e, 0xa2, 0xb3, 0xb5, 0x2a, 0xaf, 0x6c, 0xc4, 0x6e, 0x46, 0x8b, 0x7e, 0x84, 0x75, + 0x49, 0x19, 0x11, 0x42, 0x1b, 0xdb, 0x5e, 0x15, 0x34, 0xd7, 0xa0, 0x37, 0xb0, 0x47, 0x64, 0xa0, + 0x6e, 0x18, 0x55, 0x81, 0x9f, 0xf7, 0xf5, 0x60, 0x55, 0xdc, 0xee, 0x9c, 0x90, 0x33, 0xf7, 0x06, + 0xf6, 0x86, 0x54, 0xd0, 0x68, 0x48, 0x23, 0xff, 0x36, 0xcf, 0x7e, 0xb8, 0x32, 0x7b, 0x4e, 0xc8, + 0xb1, 0xbb, 0x50, 0x0e, 0x39, 0x17, 0x79, 0xea, 0xfa, 0xaa, 0x54, 0x5b, 0x6b, 0x73, 0xbc, 0x2b, + 0xa8, 0x8d, 0x92, 0xc8, 0xd7, 0xdf, 0x79, 0x26, 0xac, 0xca, 0xac, 0x4e, 0xf5, 0x39, 0xee, 0x39, + 0x6c, 0x0f, 0xe9, 0x75, 0x32, 0xc6, 0xb1, 0x92, 0x81, 0x10, 0x54, 0x3a, 0x1b, 0xab, 0x02, 0xb7, + 0x8c, 0xb0, 0x3f, 0xd1, 0xa1, 0x63, 0xa8, 0x0d, 0x83, 0xd8, 0xdc, 0x04, 0xc6, 0x87, 0x34, 0xc4, + 0x42, 0x26, 0x91, 0xde, 0xf6, 0x82, 0xb9, 0x6c, 0x95, 0x49, 0xb0, 0xa3, 0x63, 0xbd, 0x34, 0x84, + 0x28, 0x7c, 0x14, 0x9b, 0x6b, 0x85, 0xc9, 0xf4, 0x5e, 0xe5, 0x67, 0xb7, 0xb3, 0xaa, 0x99, 0xc7, + 0xf1, 0xf2, 0xf5, 0xcc, 0x6c, 0xb4, 0x23, 0x82, 0x08, 0x2b, 0x8e, 0x6f, 0x78, 0xac, 0xf2, 0x23, + 0xa0, 0x55, 0x47, 0xa8, 0x89, 0x20, 0x1a, 0xf0, 0x73, 0x1e, 0xab, 0xc5, 0x43, 0x14, 0x30, 0x11, + 0x52, 0x46, 0x23, 0x65, 0x5a, 0x70, 0x4c, 0x43, 0xea, 0x2b, 0x2e, 0x9d, 0xdd, 0x95, 0x0f, 0x51, + 0x9e, 0xd0, 0x9f, 0x00, 0x50, 0x1f, 0xaa, 0x24, 0x51, 0x1c, 0xb3, 0xe0, 0x67, 0x3a, 0xc4, 0x42, + 0x52, 0x3f, 0x88, 0xb5, 0xe7, 0xbd, 0x55, 0xc1, 0x48, 0xcb, 0x3b, 0x5a, 0xdd, 0x9b, 0x8a, 0xd1, + 0x57, 0xb0, 0x3b, 0xdb, 0x27, 0xaa, 0x48, 0x26, 0x0d, 0x55, 0xcc, 0x46, 0x55, 0xa7, 0x1b, 0x45, + 0x15, 0x99, 0x27, 0x9a, 0x1b, 0x78, 0x94, 0xef, 0x8d, 0xf5, 0x68, 0xc6, 0x6e, 0xec, 0x6c, 0x1a, + 0x3f, 0x5f, 0x7c, 0xc0, 0x4f, 0x37, 0x61, 0xed, 0x59, 0xff, 0xc1, 0xad, 0xa0, 0xde, 0x1e, 0xcb, + 0xf2, 0xe7, 0x41, 0xf4, 0x09, 0xec, 0xb0, 0x20, 0xc2, 0x63, 0x49, 0xc4, 0x0d, 0x8e, 0xf8, 0x90, + 0xc6, 0x4e, 0xd9, 0x24, 0xe4, 0x2d, 0x16, 0x44, 0x67, 0xba, 0xb5, 0xab, 0x1b, 0xd1, 0x15, 0x54, + 0x18, 0x65, 0x5c, 0x2e, 0xdc, 0xdc, 0xfb, 0xc6, 0xcb, 0xc7, 0x1f, 0xf0, 0xd2, 0xa1, 0xcc, 0x15, + 0xca, 0x98, 0x40, 0x29, 0x21, 0xb7, 0xa1, 0x7d, 0xf8, 0x34, 0xcf, 0xa5, 0x12, 0x2b, 0x22, 0xc7, + 0x54, 0x19, 0x37, 0x38, 0x22, 0x8c, 0x62, 0x73, 0xda, 0x9c, 0xd2, 0x81, 0x75, 0xb8, 0xee, 0xd5, + 0x73, 0x10, 0x2a, 0x07, 0xa6, 0xb3, 0x36, 0xd9, 0x25, 0x8c, 0x9a, 0x67, 0x03, 0x7d, 0xbf, 0xb4, + 0x7c, 0x2a, 0x60, 0x54, 0xbf, 0x02, 0x2c, 0x76, 0xaa, 0x07, 0xd6, 0x61, 0xd1, 0xdb, 0xcd, 0x2d, + 0xc8, 0x20, 0x0d, 0x77, 0x62, 0xf4, 0x0a, 0xb6, 0xcc, 0x21, 0x10, 0x93, 0xd7, 0xcc, 0x59, 0x3b, + 0xb0, 0x0e, 0x37, 0x8e, 0x9f, 0x65, 0x67, 0x78, 0xc7, 0x6b, 0xe7, 0x6d, 0x92, 0x4c, 0x23, 0xfa, + 0x16, 0x9c, 0x11, 0x09, 0x42, 0x3c, 0x4f, 0x1f, 0x54, 0x62, 0x2a, 0x25, 0x97, 0xb1, 0x53, 0x33, + 0xfb, 0x5e, 0xd3, 0x71, 0x37, 0x9a, 0x39, 0x68, 0x99, 0xa0, 0x4e, 0x3c, 0x77, 0x5d, 0xd1, 0xd8, + 0xb1, 0x8d, 0x8d, 0x7a, 0xd6, 0xc6, 0xdd, 0x4f, 0xa4, 0x57, 0x59, 0xbe, 0x9b, 0x31, 0x7a, 0x0a, + 0x30, 0x33, 0x12, 0x3b, 0x43, 0xf3, 0x70, 0x66, 0x5a, 0xd0, 0x4f, 0x50, 0xf6, 0x93, 0x58, 0x71, + 0x86, 0x33, 0xdd, 0xfe, 0xd0, 0x0f, 0xec, 0xc6, 0xf1, 0xd1, 0x07, 0x76, 0xf7, 0xc4, 0x88, 0xcc, + 0x39, 0x99, 0x4d, 0xc5, 0xb3, 0x53, 0x94, 0x3b, 0xc7, 0x5f, 0xc3, 0xb3, 0x20, 0xd2, 0x05, 0xca, + 0x7c, 0x35, 0x16, 0x6a, 0x09, 0xe7, 0xb7, 0x82, 0x99, 0x61, 0xb6, 0xfa, 0x68, 0x5c, 0x4d, 0xfa, + 0xa4, 0x83, 0x79, 0x4f, 0x0c, 0x63, 0x86, 0xcd, 0x47, 0xd1, 0x08, 0x9e, 0x8b, 0xc5, 0x7c, 0xb3, + 0x34, 0xca, 0xef, 0xff, 0x3d, 0xca, 0x53, 0xb1, 0x90, 0x74, 0xf2, 0xf1, 0xfd, 0xbf, 0x2c, 0xa8, + 0xde, 0x35, 0x6d, 0x84, 0xe0, 0xbe, 0x3e, 0xad, 0xa6, 0xbe, 0x58, 0xf7, 0xcc, 0x37, 0x1a, 0xc1, + 0x96, 0x3e, 0x49, 0x8c, 0xea, 0xc9, 0x33, 0x22, 0x9c, 0x82, 0x59, 0xd2, 0xe6, 0xff, 0x5c, 0xd2, + 0x46, 0x6f, 0x0a, 0xe9, 0x10, 0xd1, 0x8a, 0x94, 0xbc, 0xf5, 0x36, 0x45, 0xa6, 0x69, 0xff, 0x0a, + 0xca, 0x4b, 0x5d, 0x90, 0x0d, 0xc5, 0xb7, 0xf4, 0x76, 0xe2, 0x47, 0x7f, 0xa2, 0xcf, 0x61, 0xcd, + 0x54, 0x7a, 0x4e, 0xba, 0x0c, 0xb5, 0xdc, 0xa9, 0x56, 0x4a, 0x5e, 0xe9, 0xa0, 0x97, 0xf6, 0xf9, + 0xa1, 0xf0, 0x9d, 0x55, 0xff, 0x06, 0x4a, 0x69, 0x72, 0x43, 0x1b, 0xf0, 0xe0, 0x55, 0xeb, 0xb4, + 0x79, 0x79, 0x31, 0xb0, 0xef, 0xa1, 0x12, 0x14, 0xdc, 0xae, 0x6d, 0xa1, 0x07, 0x50, 0x74, 0x4f, + 0x4f, 0xed, 0x02, 0xda, 0x06, 0x68, 0x9e, 0x9d, 0x79, 0xad, 0x7e, 0xbf, 0x7d, 0xd5, 0xb2, 0x8b, + 0xf5, 0x17, 0x50, 0x5e, 0x4a, 0x42, 0xa8, 0x06, 0xe5, 0x09, 0x02, 0x77, 0x2f, 0x3b, 0xb8, 0x3d, + 0x68, 0x79, 0x7d, 0xfb, 0x9e, 0x81, 0x74, 0x5b, 0x29, 0x6d, 0xf0, 0xda, 0xb5, 0x0b, 0xf5, 0x5f, + 0x2d, 0x80, 0x79, 0xde, 0x40, 0x15, 0xd8, 0x99, 0xea, 0x3a, 0xad, 0x0e, 0x76, 0x7b, 0xda, 0xc2, + 0x36, 0x40, 0xd7, 0x9d, 0xfd, 0x5b, 0x08, 0xa0, 0xd4, 0x69, 0x76, 0x2f, 0x9b, 0x17, 0x76, 0x01, + 0xed, 0x41, 0xa5, 0xff, 0xba, 0xd9, 0xeb, 0xb5, 0xbb, 0x67, 0xf8, 0xbc, 0x75, 0xe9, 0xb5, 0xfb, + 0x83, 0xf6, 0x49, 0xdf, 0xbe, 0x8f, 0x9e, 0x80, 0xe3, 0xb5, 0x4e, 0xdc, 0x4e, 0xef, 0x72, 0xd0, + 0x1c, 0xb4, 0xdd, 0x6e, 0x36, 0xba, 0x86, 0x1e, 0x41, 0xad, 0x7f, 0x72, 0xde, 0x7a, 0x75, 0x79, + 0xb1, 0x20, 0x2c, 0xe9, 0xd1, 0x32, 0xff, 0xc5, 0x97, 0x31, 0x38, 0x5c, 0x8e, 0xb3, 0xcb, 0x37, + 0x2b, 0xa8, 0x5f, 0x56, 0xf3, 0x1b, 0xda, 0xd3, 0x45, 0x72, 0xdc, 0xb3, 0xde, 0xbc, 0x18, 0x07, + 0xea, 0x26, 0xb9, 0x6e, 0xf8, 0x9c, 0x1d, 0x65, 0x4a, 0xec, 0xbb, 0x3f, 0xc7, 0x3c, 0x5f, 0x7b, + 0xff, 0x6d, 0x59, 0xd7, 0x25, 0xf3, 0xf3, 0xe5, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x39, 0x03, + 0x23, 0x91, 0x09, 0x0c, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/rewriter_config.proto b/executor/proto/tensorflow/core/protobuf/rewriter_config.proto new file mode 100644 index 0000000000..54943eec07 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/rewriter_config.proto @@ -0,0 +1,187 @@ +syntax = "proto3"; + +package tensorflow; + +option cc_enable_arenas = true; +option java_outer_classname = "RewriterConfigProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; + +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf"; + +import "tensorflow/core/framework/attr_value.proto"; +import "tensorflow/core/protobuf/verifier_config.proto"; + +message AutoParallelOptions { + bool enable = 1; + int32 num_replicas = 2; +} + +message ScopedAllocatorOptions { + // If present, only perform optimization for these ops. + repeated string enable_op = 1; +} + +message RewriterConfig { + // Graph rewriting is experimental and subject to change, not covered by any + // API stability guarantees. + + // Configuration options for the meta-optimizer. Unless otherwise noted, these + // configuration options do not apply to explicitly triggered optimization + // passes in the optimizers field. + + enum Toggle { + DEFAULT = 0; + ON = 1; + OFF = 2; + // Enable some aggressive optimizations that use assumptions that TF graphs + // may break. For example, assume the shape of a placeholder matches its + // actual feed. + AGGRESSIVE = 3; + } + + // Enum controlling the number of times to run optimizers. The default is to + // run them twice. + enum NumIterationsType { + DEFAULT_NUM_ITERS = 0; + ONE = 1; + TWO = 2; + } + + // Optimize tensor layouts (default is ON) + // e.g. This will try to use NCHW layout on GPU which is faster. + Toggle layout_optimizer = 1; + // Fold constants (default is ON) + // Statically infer the value of tensors when possible, and materialize the + // result using constants. + Toggle constant_folding = 3; + // Shape optimizations (default is ON) + // Simplify computations made on shapes. + Toggle shape_optimization = 13; + // Remapping (default is ON) + // Remap subgraphs onto more efficient implementations. + Toggle remapping = 14; + // Arithmetic optimizations (default is ON) + // e.g. Simplify arithmetic ops; merge ops with same value (like constants). + Toggle arithmetic_optimization = 7; + // Control dependency optimizations (default is ON). + // Remove redundant control dependencies, which may enable other optimization. + Toggle dependency_optimization = 8; + // Loop optimizations (default is ON). + Toggle loop_optimization = 9; + // Function optimizations (default is ON). + Toggle function_optimization = 10; + // Strips debug-related nodes from the graph (off by default). + Toggle debug_stripper = 11; + // If true, don't remove unnecessary ops from the graph + bool disable_model_pruning = 2; + // Try to allocate some independent Op outputs contiguously in order to + // merge or eliminate downstream Ops (off by default). + Toggle scoped_allocator_optimization = 15; + // Force small ops onto the CPU (default is OFF). + Toggle pin_to_host_optimization = 18; + // Enable the swap of kernel implementations based on the device placement + // (default is ON). + Toggle implementation_selector = 22; + // Optimize data types (default is OFF). + // e.g., This will try to use float16 on GPU which is faster. + // Note that this can change the numerical stability of the graph and may + // require the use of loss scaling to maintain model convergence. + Toggle auto_mixed_precision = 23; + // Disable the entire meta optimizer (off by default). + bool disable_meta_optimizer = 19; + + // Controls how many times we run the optimizers in meta optimizer (default + // is once). + NumIterationsType meta_optimizer_iterations = 12; + + // The minimum number of nodes in a graph to optimizer. For smaller graphs, + // optimization is skipped. + // 0 means the system picks an appropriate number. + // < 0 means do not skip optimization. + int32 min_graph_nodes = 17; + + enum MemOptType { + // The default setting (SCHEDULING and SWAPPING HEURISTICS only) + DEFAULT_MEM_OPT = 0; + // Disabled in the meta-optimizer. + NO_MEM_OPT = 1; + // Driven by manual op-level annotations. + MANUAL = 2; + + // Driven by heuristics. The behavior of these heuristics is subject to + // change. Currently includes an experimental recomputation and swapping + // heuristics. Manual annotations are respected, but additional nodes are + // selected automatically. + + // Swapping heuristic will move a tensor from the GPU to the CPU and move + // it back when needed to reduce peak memory usage. + SWAPPING_HEURISTICS = 4; + // Recomputation heuristics will recompute ops (such as Relu activation) + // during backprop instead of storing them, reducing peak memory usage. + RECOMPUTATION_HEURISTICS = 5; + // Scheduling will split big ops such as AddN and try to enforce a schedule + // of the new computations that decreases peak memory usage. + SCHEDULING_HEURISTICS = 6; + // Use any combination of swapping and recomputation heuristics. + HEURISTICS = 3; + } + // Configures memory optimization passes through the meta-optimizer. Has no + // effect on manually requested memory optimization passes in the optimizers + // field. + MemOptType memory_optimization = 4; + // A node name scope for node names which are valid outputs of recompuations. + // Inputs to nodes that match this scope may be recomputed (subject either to + // manual annotation of those input nodes or to manual annotation and + // heuristics depending on memory_optimization), but the nodes themselves will + // not be recomputed. This matches any sub-scopes as well, meaning the scope + // can appear not just as a top-level scope. For example, if the value is + // "gradients/", the default, it will match node name "gradients/foo", + // "foo/gradients/bar", but not "foo_gradients/" + string memory_optimizer_target_node_name_scope = 6; + // Maximum number of milliseconds to spend optimizing a single graph before + // timing out. If equal to 0 the system picks a default (currently 5 minutes). + // If less than 0 the optimizer will never time out. + int64 meta_optimizer_timeout_ms = 20; + + // Configures AutoParallel optimization passes either through the + // meta-optimizer or when manually specified through the optimizers field. + AutoParallelOptions auto_parallel = 5; + + // If true, any optimization pass failing will cause the MetaOptimizer to + // stop with an error. By default - or when set to false, failing passes are + // skipped silently. + bool fail_on_optimizer_errors = 21; + + ScopedAllocatorOptions scoped_allocator_opts = 16; + + // If non-empty, will use this as an alternative way to specify a list of + // optimizations to turn on and the order of the optimizations (replacing the + // meta-optimizer). + // + // Of the RewriterConfig options, only the AutoParallel configuration options + // (the auto_parallel field) apply to manually requested optimization passes + // ("autoparallel"). Memory optimization passes ("memory") invoked here are + // not configurable (in contrast to memory optimization passes through the + // meta-optimizer) and act only on manual op annotations. + // + // Custom optimizers (see custom_optimizers) that are not part of this + // schedule will be run after - in the order that they were specified. + repeated string optimizers = 100; + + // Message to describe custom graph optimizer and its parameters + message CustomGraphOptimizer { + string name = 1; + map parameter_map = 2; + } + + // list of CustomGraphOptimizers to apply. + repeated CustomGraphOptimizer custom_optimizers = 200; + + // VerifierConfig specifying the verifiers to be run after every optimizer. + VerifierConfig inter_optimizer_verifier_config = 300; + + // VerifierConfig specifying the verifiers to be run at the end, after all + // optimizers have run. + VerifierConfig post_optimization_verifier_config = 301; +} diff --git a/executor/proto/tensorflow/core/protobuf/saved_model.pb.go b/executor/proto/tensorflow/core/protobuf/saved_model.pb.go new file mode 100644 index 0000000000..8b014a2472 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/saved_model.pb.go @@ -0,0 +1,101 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/saved_model.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// SavedModel is the high level serialization format for TensorFlow Models. +// See [todo: doc links, similar to session_bundle] for more information. +type SavedModel struct { + // The schema version of the SavedModel instance. Used for versioning when + // making future changes to the specification/implementation. Initial value + // at release will be 1. + SavedModelSchemaVersion int64 `protobuf:"varint,1,opt,name=saved_model_schema_version,json=savedModelSchemaVersion,proto3" json:"saved_model_schema_version,omitempty"` + // One or more MetaGraphs. + MetaGraphs []*MetaGraphDef `protobuf:"bytes,2,rep,name=meta_graphs,json=metaGraphs,proto3" json:"meta_graphs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SavedModel) Reset() { *m = SavedModel{} } +func (m *SavedModel) String() string { return proto.CompactTextString(m) } +func (*SavedModel) ProtoMessage() {} +func (*SavedModel) Descriptor() ([]byte, []int) { + return fileDescriptor_537826d0bcc2f334, []int{0} +} + +func (m *SavedModel) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SavedModel.Unmarshal(m, b) +} +func (m *SavedModel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SavedModel.Marshal(b, m, deterministic) +} +func (m *SavedModel) XXX_Merge(src proto.Message) { + xxx_messageInfo_SavedModel.Merge(m, src) +} +func (m *SavedModel) XXX_Size() int { + return xxx_messageInfo_SavedModel.Size(m) +} +func (m *SavedModel) XXX_DiscardUnknown() { + xxx_messageInfo_SavedModel.DiscardUnknown(m) +} + +var xxx_messageInfo_SavedModel proto.InternalMessageInfo + +func (m *SavedModel) GetSavedModelSchemaVersion() int64 { + if m != nil { + return m.SavedModelSchemaVersion + } + return 0 +} + +func (m *SavedModel) GetMetaGraphs() []*MetaGraphDef { + if m != nil { + return m.MetaGraphs + } + return nil +} + +func init() { + proto.RegisterType((*SavedModel)(nil), "tensorflow.SavedModel") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/saved_model.proto", fileDescriptor_537826d0bcc2f334) +} + +var fileDescriptor_537826d0bcc2f334 = []byte{ + // 228 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x2a, 0x49, 0xcd, 0x2b, + 0xce, 0x2f, 0x4a, 0xcb, 0xc9, 0x2f, 0xd7, 0x4f, 0xce, 0x2f, 0x4a, 0xd5, 0x2f, 0x28, 0xca, 0x2f, + 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x4e, 0x2c, 0x4b, 0x4d, 0x89, 0xcf, 0xcd, 0x4f, 0x49, 0xcd, + 0xd1, 0x03, 0x0b, 0x0a, 0x71, 0x21, 0xd4, 0x4a, 0x69, 0xe2, 0xd4, 0x97, 0x9b, 0x5a, 0x92, 0x18, + 0x9f, 0x5e, 0x94, 0x58, 0x90, 0x01, 0xd1, 0xa6, 0xd4, 0xc2, 0xc8, 0xc5, 0x15, 0x0c, 0x32, 0xcc, + 0x17, 0x64, 0x96, 0x90, 0x35, 0x97, 0x14, 0x92, 0xd1, 0xf1, 0xc5, 0xc9, 0x19, 0xa9, 0xb9, 0x89, + 0xf1, 0x65, 0xa9, 0x45, 0xc5, 0x99, 0xf9, 0x79, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0xe2, + 0xc5, 0x70, 0xf5, 0xc1, 0x60, 0xf9, 0x30, 0x88, 0xb4, 0x90, 0x25, 0x17, 0x37, 0xc2, 0xfc, 0x62, + 0x09, 0x26, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x09, 0x3d, 0x84, 0x63, 0xf4, 0x7c, 0x53, 0x4b, 0x12, + 0xdd, 0x41, 0xb2, 0x2e, 0xa9, 0x69, 0x41, 0x5c, 0xb9, 0x30, 0x5e, 0xb1, 0x53, 0x3e, 0x97, 0x44, + 0x7e, 0x51, 0x3a, 0xb2, 0xd2, 0xb4, 0xa2, 0xc4, 0xdc, 0xd4, 0xf2, 0xfc, 0xa2, 0x6c, 0x27, 0x01, + 0x84, 0xfb, 0x02, 0x40, 0x6e, 0x2e, 0x0e, 0x60, 0x8c, 0xb2, 0x49, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, + 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x47, 0xf2, 0x2c, 0x76, 0x66, 0x7a, 0x3e, 0x6a, 0x28, 0xfc, 0x60, + 0x64, 0x4c, 0x62, 0x03, 0x73, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x82, 0x52, 0xcf, 0x18, + 0x63, 0x01, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/saved_model.proto b/executor/proto/tensorflow/core/protobuf/saved_model.proto new file mode 100644 index 0000000000..03789d3df7 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/saved_model.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "SavedModelProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf"; +import "tensorflow/core/protobuf/meta_graph.proto"; + +// SavedModel is the high level serialization format for TensorFlow Models. +// See [todo: doc links, similar to session_bundle] for more information. +message SavedModel { + // The schema version of the SavedModel instance. Used for versioning when + // making future changes to the specification/implementation. Initial value + // at release will be 1. + int64 saved_model_schema_version = 1; + + // One or more MetaGraphs. + repeated MetaGraphDef meta_graphs = 2; +} diff --git a/executor/proto/tensorflow/core/protobuf/saved_object_graph.pb.go b/executor/proto/tensorflow/core/protobuf/saved_object_graph.pb.go new file mode 100644 index 0000000000..5f0208b8da --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/saved_object_graph.pb.go @@ -0,0 +1,854 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/saved_object_graph.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + framework "github.com/tensorflow/tensorflow/tensorflow/go/core/framework" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type SavedObjectGraph struct { + // Flattened list of objects in the object graph. + // + // The position of the object in this list indicates its id. + // Nodes[0] is considered the root node. + Nodes []*SavedObject `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"` + // Information about captures and output structures in concrete functions. + // Referenced from SavedBareConcreteFunction and SavedFunction. + ConcreteFunctions map[string]*SavedConcreteFunction `protobuf:"bytes,2,rep,name=concrete_functions,json=concreteFunctions,proto3" json:"concrete_functions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SavedObjectGraph) Reset() { *m = SavedObjectGraph{} } +func (m *SavedObjectGraph) String() string { return proto.CompactTextString(m) } +func (*SavedObjectGraph) ProtoMessage() {} +func (*SavedObjectGraph) Descriptor() ([]byte, []int) { + return fileDescriptor_4f63c49021beb5aa, []int{0} +} + +func (m *SavedObjectGraph) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SavedObjectGraph.Unmarshal(m, b) +} +func (m *SavedObjectGraph) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SavedObjectGraph.Marshal(b, m, deterministic) +} +func (m *SavedObjectGraph) XXX_Merge(src proto.Message) { + xxx_messageInfo_SavedObjectGraph.Merge(m, src) +} +func (m *SavedObjectGraph) XXX_Size() int { + return xxx_messageInfo_SavedObjectGraph.Size(m) +} +func (m *SavedObjectGraph) XXX_DiscardUnknown() { + xxx_messageInfo_SavedObjectGraph.DiscardUnknown(m) +} + +var xxx_messageInfo_SavedObjectGraph proto.InternalMessageInfo + +func (m *SavedObjectGraph) GetNodes() []*SavedObject { + if m != nil { + return m.Nodes + } + return nil +} + +func (m *SavedObjectGraph) GetConcreteFunctions() map[string]*SavedConcreteFunction { + if m != nil { + return m.ConcreteFunctions + } + return nil +} + +type SavedObject struct { + // Objects which this object depends on: named edges in the dependency + // graph. + // + // Note: currently only valid if kind == "user_object". + Children []*TrackableObjectGraph_TrackableObject_ObjectReference `protobuf:"bytes,1,rep,name=children,proto3" json:"children,omitempty"` + // Slot variables owned by this object. This describes the three-way + // (optimizer, variable, slot variable) relationship; none of the three + // depend on the others directly. + // + // Note: currently only valid if kind == "user_object". + SlotVariables []*TrackableObjectGraph_TrackableObject_SlotVariableReference `protobuf:"bytes,3,rep,name=slot_variables,json=slotVariables,proto3" json:"slot_variables,omitempty"` + // Types that are valid to be assigned to Kind: + // *SavedObject_UserObject + // *SavedObject_Asset + // *SavedObject_Function + // *SavedObject_Variable + // *SavedObject_BareConcreteFunction + // *SavedObject_Constant + // *SavedObject_Resource + Kind isSavedObject_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SavedObject) Reset() { *m = SavedObject{} } +func (m *SavedObject) String() string { return proto.CompactTextString(m) } +func (*SavedObject) ProtoMessage() {} +func (*SavedObject) Descriptor() ([]byte, []int) { + return fileDescriptor_4f63c49021beb5aa, []int{1} +} + +func (m *SavedObject) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SavedObject.Unmarshal(m, b) +} +func (m *SavedObject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SavedObject.Marshal(b, m, deterministic) +} +func (m *SavedObject) XXX_Merge(src proto.Message) { + xxx_messageInfo_SavedObject.Merge(m, src) +} +func (m *SavedObject) XXX_Size() int { + return xxx_messageInfo_SavedObject.Size(m) +} +func (m *SavedObject) XXX_DiscardUnknown() { + xxx_messageInfo_SavedObject.DiscardUnknown(m) +} + +var xxx_messageInfo_SavedObject proto.InternalMessageInfo + +func (m *SavedObject) GetChildren() []*TrackableObjectGraph_TrackableObject_ObjectReference { + if m != nil { + return m.Children + } + return nil +} + +func (m *SavedObject) GetSlotVariables() []*TrackableObjectGraph_TrackableObject_SlotVariableReference { + if m != nil { + return m.SlotVariables + } + return nil +} + +type isSavedObject_Kind interface { + isSavedObject_Kind() +} + +type SavedObject_UserObject struct { + UserObject *SavedUserObject `protobuf:"bytes,4,opt,name=user_object,json=userObject,proto3,oneof"` +} + +type SavedObject_Asset struct { + Asset *SavedAsset `protobuf:"bytes,5,opt,name=asset,proto3,oneof"` +} + +type SavedObject_Function struct { + Function *SavedFunction `protobuf:"bytes,6,opt,name=function,proto3,oneof"` +} + +type SavedObject_Variable struct { + Variable *SavedVariable `protobuf:"bytes,7,opt,name=variable,proto3,oneof"` +} + +type SavedObject_BareConcreteFunction struct { + BareConcreteFunction *SavedBareConcreteFunction `protobuf:"bytes,8,opt,name=bare_concrete_function,json=bareConcreteFunction,proto3,oneof"` +} + +type SavedObject_Constant struct { + Constant *SavedConstant `protobuf:"bytes,9,opt,name=constant,proto3,oneof"` +} + +type SavedObject_Resource struct { + Resource *SavedResource `protobuf:"bytes,10,opt,name=resource,proto3,oneof"` +} + +func (*SavedObject_UserObject) isSavedObject_Kind() {} + +func (*SavedObject_Asset) isSavedObject_Kind() {} + +func (*SavedObject_Function) isSavedObject_Kind() {} + +func (*SavedObject_Variable) isSavedObject_Kind() {} + +func (*SavedObject_BareConcreteFunction) isSavedObject_Kind() {} + +func (*SavedObject_Constant) isSavedObject_Kind() {} + +func (*SavedObject_Resource) isSavedObject_Kind() {} + +func (m *SavedObject) GetKind() isSavedObject_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *SavedObject) GetUserObject() *SavedUserObject { + if x, ok := m.GetKind().(*SavedObject_UserObject); ok { + return x.UserObject + } + return nil +} + +func (m *SavedObject) GetAsset() *SavedAsset { + if x, ok := m.GetKind().(*SavedObject_Asset); ok { + return x.Asset + } + return nil +} + +func (m *SavedObject) GetFunction() *SavedFunction { + if x, ok := m.GetKind().(*SavedObject_Function); ok { + return x.Function + } + return nil +} + +func (m *SavedObject) GetVariable() *SavedVariable { + if x, ok := m.GetKind().(*SavedObject_Variable); ok { + return x.Variable + } + return nil +} + +func (m *SavedObject) GetBareConcreteFunction() *SavedBareConcreteFunction { + if x, ok := m.GetKind().(*SavedObject_BareConcreteFunction); ok { + return x.BareConcreteFunction + } + return nil +} + +func (m *SavedObject) GetConstant() *SavedConstant { + if x, ok := m.GetKind().(*SavedObject_Constant); ok { + return x.Constant + } + return nil +} + +func (m *SavedObject) GetResource() *SavedResource { + if x, ok := m.GetKind().(*SavedObject_Resource); ok { + return x.Resource + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*SavedObject) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*SavedObject_UserObject)(nil), + (*SavedObject_Asset)(nil), + (*SavedObject_Function)(nil), + (*SavedObject_Variable)(nil), + (*SavedObject_BareConcreteFunction)(nil), + (*SavedObject_Constant)(nil), + (*SavedObject_Resource)(nil), + } +} + +// A SavedUserObject is an object (in the object-oriented language of the +// TensorFlow program) of some user- or framework-defined class other than +// those handled specifically by the other kinds of SavedObjects. +// +// This object cannot be evaluated as a tensor, and therefore cannot be bound +// to an input of a function. +type SavedUserObject struct { + // Corresponds to a registration of the type to use in the loading program. + Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + // Version information from the producer of this SavedUserObject. + Version *framework.VersionDef `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // Initialization-related metadata. + Metadata string `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SavedUserObject) Reset() { *m = SavedUserObject{} } +func (m *SavedUserObject) String() string { return proto.CompactTextString(m) } +func (*SavedUserObject) ProtoMessage() {} +func (*SavedUserObject) Descriptor() ([]byte, []int) { + return fileDescriptor_4f63c49021beb5aa, []int{2} +} + +func (m *SavedUserObject) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SavedUserObject.Unmarshal(m, b) +} +func (m *SavedUserObject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SavedUserObject.Marshal(b, m, deterministic) +} +func (m *SavedUserObject) XXX_Merge(src proto.Message) { + xxx_messageInfo_SavedUserObject.Merge(m, src) +} +func (m *SavedUserObject) XXX_Size() int { + return xxx_messageInfo_SavedUserObject.Size(m) +} +func (m *SavedUserObject) XXX_DiscardUnknown() { + xxx_messageInfo_SavedUserObject.DiscardUnknown(m) +} + +var xxx_messageInfo_SavedUserObject proto.InternalMessageInfo + +func (m *SavedUserObject) GetIdentifier() string { + if m != nil { + return m.Identifier + } + return "" +} + +func (m *SavedUserObject) GetVersion() *framework.VersionDef { + if m != nil { + return m.Version + } + return nil +} + +func (m *SavedUserObject) GetMetadata() string { + if m != nil { + return m.Metadata + } + return "" +} + +// A SavedAsset points to an asset in the MetaGraph. +// +// When bound to a function this object evaluates to a tensor with the absolute +// filename. Users should not depend on a particular part of the filename to +// remain stable (e.g. basename could be changed). +type SavedAsset struct { + // Index into `MetaGraphDef.asset_file_def[]` that describes the Asset. + // + // Only the field `AssetFileDef.filename` is used. Other fields, such as + // `AssetFileDef.tensor_info`, MUST be ignored. + AssetFileDefIndex int32 `protobuf:"varint,1,opt,name=asset_file_def_index,json=assetFileDefIndex,proto3" json:"asset_file_def_index,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SavedAsset) Reset() { *m = SavedAsset{} } +func (m *SavedAsset) String() string { return proto.CompactTextString(m) } +func (*SavedAsset) ProtoMessage() {} +func (*SavedAsset) Descriptor() ([]byte, []int) { + return fileDescriptor_4f63c49021beb5aa, []int{3} +} + +func (m *SavedAsset) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SavedAsset.Unmarshal(m, b) +} +func (m *SavedAsset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SavedAsset.Marshal(b, m, deterministic) +} +func (m *SavedAsset) XXX_Merge(src proto.Message) { + xxx_messageInfo_SavedAsset.Merge(m, src) +} +func (m *SavedAsset) XXX_Size() int { + return xxx_messageInfo_SavedAsset.Size(m) +} +func (m *SavedAsset) XXX_DiscardUnknown() { + xxx_messageInfo_SavedAsset.DiscardUnknown(m) +} + +var xxx_messageInfo_SavedAsset proto.InternalMessageInfo + +func (m *SavedAsset) GetAssetFileDefIndex() int32 { + if m != nil { + return m.AssetFileDefIndex + } + return 0 +} + +// A function with multiple signatures, possibly with non-Tensor arguments. +type SavedFunction struct { + ConcreteFunctions []string `protobuf:"bytes,1,rep,name=concrete_functions,json=concreteFunctions,proto3" json:"concrete_functions,omitempty"` + FunctionSpec *FunctionSpec `protobuf:"bytes,2,opt,name=function_spec,json=functionSpec,proto3" json:"function_spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SavedFunction) Reset() { *m = SavedFunction{} } +func (m *SavedFunction) String() string { return proto.CompactTextString(m) } +func (*SavedFunction) ProtoMessage() {} +func (*SavedFunction) Descriptor() ([]byte, []int) { + return fileDescriptor_4f63c49021beb5aa, []int{4} +} + +func (m *SavedFunction) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SavedFunction.Unmarshal(m, b) +} +func (m *SavedFunction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SavedFunction.Marshal(b, m, deterministic) +} +func (m *SavedFunction) XXX_Merge(src proto.Message) { + xxx_messageInfo_SavedFunction.Merge(m, src) +} +func (m *SavedFunction) XXX_Size() int { + return xxx_messageInfo_SavedFunction.Size(m) +} +func (m *SavedFunction) XXX_DiscardUnknown() { + xxx_messageInfo_SavedFunction.DiscardUnknown(m) +} + +var xxx_messageInfo_SavedFunction proto.InternalMessageInfo + +func (m *SavedFunction) GetConcreteFunctions() []string { + if m != nil { + return m.ConcreteFunctions + } + return nil +} + +func (m *SavedFunction) GetFunctionSpec() *FunctionSpec { + if m != nil { + return m.FunctionSpec + } + return nil +} + +// Stores low-level information about a concrete function. Referenced in either +// a SavedFunction or a SavedBareConcreteFunction. +type SavedConcreteFunction struct { + // Bound inputs to the function. The SavedObjects identified by the node ids + // given here are appended as extra inputs to the caller-supplied inputs. + // The only types of SavedObjects valid here are SavedVariable, SavedResource + // and SavedAsset. + BoundInputs []int32 `protobuf:"varint,2,rep,packed,name=bound_inputs,json=boundInputs,proto3" json:"bound_inputs,omitempty"` + // Input in canonicalized form that was received to create this concrete + // function. + CanonicalizedInputSignature *StructuredValue `protobuf:"bytes,3,opt,name=canonicalized_input_signature,json=canonicalizedInputSignature,proto3" json:"canonicalized_input_signature,omitempty"` + // Output that was the return value of this function after replacing all + // Tensors with TensorSpecs. This can be an arbitrary nested function and will + // be used to reconstruct the full structure from pure tensors. + OutputSignature *StructuredValue `protobuf:"bytes,4,opt,name=output_signature,json=outputSignature,proto3" json:"output_signature,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SavedConcreteFunction) Reset() { *m = SavedConcreteFunction{} } +func (m *SavedConcreteFunction) String() string { return proto.CompactTextString(m) } +func (*SavedConcreteFunction) ProtoMessage() {} +func (*SavedConcreteFunction) Descriptor() ([]byte, []int) { + return fileDescriptor_4f63c49021beb5aa, []int{5} +} + +func (m *SavedConcreteFunction) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SavedConcreteFunction.Unmarshal(m, b) +} +func (m *SavedConcreteFunction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SavedConcreteFunction.Marshal(b, m, deterministic) +} +func (m *SavedConcreteFunction) XXX_Merge(src proto.Message) { + xxx_messageInfo_SavedConcreteFunction.Merge(m, src) +} +func (m *SavedConcreteFunction) XXX_Size() int { + return xxx_messageInfo_SavedConcreteFunction.Size(m) +} +func (m *SavedConcreteFunction) XXX_DiscardUnknown() { + xxx_messageInfo_SavedConcreteFunction.DiscardUnknown(m) +} + +var xxx_messageInfo_SavedConcreteFunction proto.InternalMessageInfo + +func (m *SavedConcreteFunction) GetBoundInputs() []int32 { + if m != nil { + return m.BoundInputs + } + return nil +} + +func (m *SavedConcreteFunction) GetCanonicalizedInputSignature() *StructuredValue { + if m != nil { + return m.CanonicalizedInputSignature + } + return nil +} + +func (m *SavedConcreteFunction) GetOutputSignature() *StructuredValue { + if m != nil { + return m.OutputSignature + } + return nil +} + +type SavedBareConcreteFunction struct { + // Identifies a SavedConcreteFunction. + ConcreteFunctionName string `protobuf:"bytes,1,opt,name=concrete_function_name,json=concreteFunctionName,proto3" json:"concrete_function_name,omitempty"` + // A sequence of unique strings, one per Tensor argument. + ArgumentKeywords []string `protobuf:"bytes,2,rep,name=argument_keywords,json=argumentKeywords,proto3" json:"argument_keywords,omitempty"` + // The prefix of `argument_keywords` which may be identified by position. + AllowedPositionalArguments int64 `protobuf:"varint,3,opt,name=allowed_positional_arguments,json=allowedPositionalArguments,proto3" json:"allowed_positional_arguments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SavedBareConcreteFunction) Reset() { *m = SavedBareConcreteFunction{} } +func (m *SavedBareConcreteFunction) String() string { return proto.CompactTextString(m) } +func (*SavedBareConcreteFunction) ProtoMessage() {} +func (*SavedBareConcreteFunction) Descriptor() ([]byte, []int) { + return fileDescriptor_4f63c49021beb5aa, []int{6} +} + +func (m *SavedBareConcreteFunction) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SavedBareConcreteFunction.Unmarshal(m, b) +} +func (m *SavedBareConcreteFunction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SavedBareConcreteFunction.Marshal(b, m, deterministic) +} +func (m *SavedBareConcreteFunction) XXX_Merge(src proto.Message) { + xxx_messageInfo_SavedBareConcreteFunction.Merge(m, src) +} +func (m *SavedBareConcreteFunction) XXX_Size() int { + return xxx_messageInfo_SavedBareConcreteFunction.Size(m) +} +func (m *SavedBareConcreteFunction) XXX_DiscardUnknown() { + xxx_messageInfo_SavedBareConcreteFunction.DiscardUnknown(m) +} + +var xxx_messageInfo_SavedBareConcreteFunction proto.InternalMessageInfo + +func (m *SavedBareConcreteFunction) GetConcreteFunctionName() string { + if m != nil { + return m.ConcreteFunctionName + } + return "" +} + +func (m *SavedBareConcreteFunction) GetArgumentKeywords() []string { + if m != nil { + return m.ArgumentKeywords + } + return nil +} + +func (m *SavedBareConcreteFunction) GetAllowedPositionalArguments() int64 { + if m != nil { + return m.AllowedPositionalArguments + } + return 0 +} + +type SavedConstant struct { + // An Operation name for a ConstantOp in this SavedObjectGraph's MetaGraph. + Operation string `protobuf:"bytes,1,opt,name=operation,proto3" json:"operation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SavedConstant) Reset() { *m = SavedConstant{} } +func (m *SavedConstant) String() string { return proto.CompactTextString(m) } +func (*SavedConstant) ProtoMessage() {} +func (*SavedConstant) Descriptor() ([]byte, []int) { + return fileDescriptor_4f63c49021beb5aa, []int{7} +} + +func (m *SavedConstant) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SavedConstant.Unmarshal(m, b) +} +func (m *SavedConstant) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SavedConstant.Marshal(b, m, deterministic) +} +func (m *SavedConstant) XXX_Merge(src proto.Message) { + xxx_messageInfo_SavedConstant.Merge(m, src) +} +func (m *SavedConstant) XXX_Size() int { + return xxx_messageInfo_SavedConstant.Size(m) +} +func (m *SavedConstant) XXX_DiscardUnknown() { + xxx_messageInfo_SavedConstant.DiscardUnknown(m) +} + +var xxx_messageInfo_SavedConstant proto.InternalMessageInfo + +func (m *SavedConstant) GetOperation() string { + if m != nil { + return m.Operation + } + return "" +} + +// Represents a Variable that is initialized by loading the contents from the +// checkpoint. +type SavedVariable struct { + Dtype framework.DataType `protobuf:"varint,1,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"` + Shape *framework.TensorShapeProto `protobuf:"bytes,2,opt,name=shape,proto3" json:"shape,omitempty"` + Trainable bool `protobuf:"varint,3,opt,name=trainable,proto3" json:"trainable,omitempty"` + Synchronization framework.VariableSynchronization `protobuf:"varint,4,opt,name=synchronization,proto3,enum=tensorflow.VariableSynchronization" json:"synchronization,omitempty"` + Aggregation framework.VariableAggregation `protobuf:"varint,5,opt,name=aggregation,proto3,enum=tensorflow.VariableAggregation" json:"aggregation,omitempty"` + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SavedVariable) Reset() { *m = SavedVariable{} } +func (m *SavedVariable) String() string { return proto.CompactTextString(m) } +func (*SavedVariable) ProtoMessage() {} +func (*SavedVariable) Descriptor() ([]byte, []int) { + return fileDescriptor_4f63c49021beb5aa, []int{8} +} + +func (m *SavedVariable) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SavedVariable.Unmarshal(m, b) +} +func (m *SavedVariable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SavedVariable.Marshal(b, m, deterministic) +} +func (m *SavedVariable) XXX_Merge(src proto.Message) { + xxx_messageInfo_SavedVariable.Merge(m, src) +} +func (m *SavedVariable) XXX_Size() int { + return xxx_messageInfo_SavedVariable.Size(m) +} +func (m *SavedVariable) XXX_DiscardUnknown() { + xxx_messageInfo_SavedVariable.DiscardUnknown(m) +} + +var xxx_messageInfo_SavedVariable proto.InternalMessageInfo + +func (m *SavedVariable) GetDtype() framework.DataType { + if m != nil { + return m.Dtype + } + return framework.DataType_DT_INVALID +} + +func (m *SavedVariable) GetShape() *framework.TensorShapeProto { + if m != nil { + return m.Shape + } + return nil +} + +func (m *SavedVariable) GetTrainable() bool { + if m != nil { + return m.Trainable + } + return false +} + +func (m *SavedVariable) GetSynchronization() framework.VariableSynchronization { + if m != nil { + return m.Synchronization + } + return framework.VariableSynchronization_VARIABLE_SYNCHRONIZATION_AUTO +} + +func (m *SavedVariable) GetAggregation() framework.VariableAggregation { + if m != nil { + return m.Aggregation + } + return framework.VariableAggregation_VARIABLE_AGGREGATION_NONE +} + +func (m *SavedVariable) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Represents `FunctionSpec` used in `Function`. This represents a +// function that has been wrapped as a TensorFlow `Function`. +type FunctionSpec struct { + // Full arg spec from inspect.getfullargspec(). + Fullargspec *StructuredValue `protobuf:"bytes,1,opt,name=fullargspec,proto3" json:"fullargspec,omitempty"` + // Whether this represents a class method. + IsMethod bool `protobuf:"varint,2,opt,name=is_method,json=isMethod,proto3" json:"is_method,omitempty"` + // The input signature, if specified. + InputSignature *StructuredValue `protobuf:"bytes,5,opt,name=input_signature,json=inputSignature,proto3" json:"input_signature,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FunctionSpec) Reset() { *m = FunctionSpec{} } +func (m *FunctionSpec) String() string { return proto.CompactTextString(m) } +func (*FunctionSpec) ProtoMessage() {} +func (*FunctionSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_4f63c49021beb5aa, []int{9} +} + +func (m *FunctionSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FunctionSpec.Unmarshal(m, b) +} +func (m *FunctionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FunctionSpec.Marshal(b, m, deterministic) +} +func (m *FunctionSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_FunctionSpec.Merge(m, src) +} +func (m *FunctionSpec) XXX_Size() int { + return xxx_messageInfo_FunctionSpec.Size(m) +} +func (m *FunctionSpec) XXX_DiscardUnknown() { + xxx_messageInfo_FunctionSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_FunctionSpec proto.InternalMessageInfo + +func (m *FunctionSpec) GetFullargspec() *StructuredValue { + if m != nil { + return m.Fullargspec + } + return nil +} + +func (m *FunctionSpec) GetIsMethod() bool { + if m != nil { + return m.IsMethod + } + return false +} + +func (m *FunctionSpec) GetInputSignature() *StructuredValue { + if m != nil { + return m.InputSignature + } + return nil +} + +// A SavedResource represents a TF object that holds state during its lifetime. +// An object of this type can have a reference to a: +// create_resource() and an initialize() function. +type SavedResource struct { + // A device specification indicating a required placement for the resource + // creation function, e.g. "CPU". An empty string allows the user to select a + // device. + Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SavedResource) Reset() { *m = SavedResource{} } +func (m *SavedResource) String() string { return proto.CompactTextString(m) } +func (*SavedResource) ProtoMessage() {} +func (*SavedResource) Descriptor() ([]byte, []int) { + return fileDescriptor_4f63c49021beb5aa, []int{10} +} + +func (m *SavedResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SavedResource.Unmarshal(m, b) +} +func (m *SavedResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SavedResource.Marshal(b, m, deterministic) +} +func (m *SavedResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_SavedResource.Merge(m, src) +} +func (m *SavedResource) XXX_Size() int { + return xxx_messageInfo_SavedResource.Size(m) +} +func (m *SavedResource) XXX_DiscardUnknown() { + xxx_messageInfo_SavedResource.DiscardUnknown(m) +} + +var xxx_messageInfo_SavedResource proto.InternalMessageInfo + +func (m *SavedResource) GetDevice() string { + if m != nil { + return m.Device + } + return "" +} + +func init() { + proto.RegisterType((*SavedObjectGraph)(nil), "tensorflow.SavedObjectGraph") + proto.RegisterMapType((map[string]*SavedConcreteFunction)(nil), "tensorflow.SavedObjectGraph.ConcreteFunctionsEntry") + proto.RegisterType((*SavedObject)(nil), "tensorflow.SavedObject") + proto.RegisterType((*SavedUserObject)(nil), "tensorflow.SavedUserObject") + proto.RegisterType((*SavedAsset)(nil), "tensorflow.SavedAsset") + proto.RegisterType((*SavedFunction)(nil), "tensorflow.SavedFunction") + proto.RegisterType((*SavedConcreteFunction)(nil), "tensorflow.SavedConcreteFunction") + proto.RegisterType((*SavedBareConcreteFunction)(nil), "tensorflow.SavedBareConcreteFunction") + proto.RegisterType((*SavedConstant)(nil), "tensorflow.SavedConstant") + proto.RegisterType((*SavedVariable)(nil), "tensorflow.SavedVariable") + proto.RegisterType((*FunctionSpec)(nil), "tensorflow.FunctionSpec") + proto.RegisterType((*SavedResource)(nil), "tensorflow.SavedResource") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/saved_object_graph.proto", fileDescriptor_4f63c49021beb5aa) +} + +var fileDescriptor_4f63c49021beb5aa = []byte{ + // 1055 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xdb, 0x6e, 0x1b, 0x45, + 0x18, 0x8e, 0x0f, 0x9b, 0xda, 0xbf, 0x73, 0x70, 0x46, 0x21, 0x6c, 0x93, 0x00, 0xa9, 0x51, 0x45, + 0x04, 0xc4, 0x81, 0x14, 0x54, 0x84, 0x14, 0xd4, 0xa4, 0x21, 0xa4, 0x41, 0x85, 0x6a, 0x5c, 0x7a, + 0x05, 0x5a, 0x8d, 0x77, 0xff, 0x75, 0x96, 0xac, 0x67, 0xac, 0x99, 0xd9, 0x04, 0x57, 0x42, 0xbc, + 0x01, 0xaf, 0xc1, 0x03, 0x70, 0xcb, 0x05, 0xef, 0xc2, 0x4b, 0x70, 0x89, 0x76, 0x76, 0xd6, 0x5e, + 0x9f, 0x14, 0x7a, 0xe7, 0xf9, 0xe7, 0xfb, 0xbe, 0xf9, 0xcf, 0x6b, 0xf8, 0x54, 0x23, 0x57, 0x42, + 0x86, 0xb1, 0xb8, 0x3d, 0xf4, 0x85, 0xc4, 0xc3, 0x81, 0x14, 0x5a, 0x74, 0x93, 0xf0, 0x50, 0xb1, + 0x1b, 0x0c, 0x3c, 0xd1, 0xfd, 0x19, 0x7d, 0xed, 0xf5, 0x24, 0x1b, 0x5c, 0xb5, 0xcd, 0x1d, 0x81, + 0x31, 0x65, 0xfb, 0xf3, 0x85, 0x74, 0x2d, 0x99, 0x7f, 0xcd, 0xba, 0x31, 0xce, 0x91, 0xd8, 0x7e, + 0xb8, 0xf8, 0x55, 0x2d, 0x13, 0x5f, 0x5b, 0xd8, 0xc7, 0xd3, 0xb0, 0x50, 0xb2, 0x3e, 0xde, 0x0a, + 0x79, 0x7d, 0x98, 0xdd, 0x78, 0xea, 0x8a, 0x0d, 0x70, 0x91, 0x68, 0x01, 0x3d, 0x1c, 0xa0, 0xb2, + 0xb0, 0xfd, 0xc5, 0xb0, 0x1b, 0x94, 0x2a, 0x12, 0xfc, 0xff, 0x20, 0x99, 0x8c, 0xd2, 0xe0, 0x32, + 0x64, 0xeb, 0xf7, 0x32, 0x34, 0x3b, 0x69, 0xbe, 0xbe, 0x37, 0xb1, 0x7e, 0x93, 0x86, 0x4a, 0x0e, + 0xc0, 0xe1, 0x22, 0x40, 0xe5, 0x96, 0xf6, 0x2a, 0xfb, 0x8d, 0xa3, 0xb7, 0xdb, 0x63, 0xb9, 0x76, + 0x01, 0x4c, 0x33, 0x14, 0xe9, 0x02, 0xf1, 0x05, 0xf7, 0x25, 0x6a, 0xf4, 0xc2, 0x84, 0xfb, 0x3a, + 0xf5, 0xc4, 0x2d, 0x1b, 0xee, 0xa3, 0x05, 0x5c, 0xf3, 0x50, 0xfb, 0xa9, 0xa5, 0x9d, 0xe7, 0xac, + 0xaf, 0xb9, 0x96, 0x43, 0xba, 0xe1, 0x4f, 0xdb, 0xb7, 0x7b, 0xb0, 0x35, 0x1f, 0x4c, 0x9a, 0x50, + 0xb9, 0xc6, 0xa1, 0x5b, 0xda, 0x2b, 0xed, 0xd7, 0x69, 0xfa, 0x93, 0x3c, 0x06, 0xe7, 0x86, 0xc5, + 0x09, 0xba, 0xe5, 0xbd, 0xd2, 0x7e, 0xe3, 0xe8, 0xc1, 0x8c, 0x0b, 0xd3, 0x4a, 0x34, 0xc3, 0x7f, + 0x59, 0xfe, 0xa2, 0xd4, 0xfa, 0xc3, 0x81, 0x46, 0xc1, 0x4f, 0xf2, 0x23, 0xd4, 0xfc, 0xab, 0x28, + 0x0e, 0x24, 0x72, 0x9b, 0x8e, 0x27, 0x45, 0xbd, 0x97, 0x79, 0xb3, 0x14, 0xc3, 0x9a, 0x32, 0xb6, + 0x6d, 0xba, 0x30, 0x44, 0x89, 0xdc, 0x47, 0x3a, 0x52, 0x24, 0x7d, 0x58, 0x53, 0xb1, 0xd0, 0x5e, + 0x5e, 0x15, 0xe5, 0x56, 0xcc, 0x1b, 0xe7, 0x6f, 0xfc, 0x46, 0x27, 0x16, 0xfa, 0x95, 0x55, 0x19, + 0xbf, 0xb4, 0xaa, 0x0a, 0x66, 0x45, 0xbe, 0x82, 0x46, 0xa2, 0x50, 0xda, 0xc6, 0x76, 0xab, 0x26, + 0x3f, 0x3b, 0x33, 0xf9, 0xf9, 0x41, 0xa1, 0xcc, 0x64, 0x2f, 0x96, 0x28, 0x24, 0xa3, 0x13, 0x69, + 0x83, 0xc3, 0x94, 0x42, 0xed, 0x3a, 0x86, 0xb9, 0x35, 0xc3, 0x3c, 0x49, 0x6f, 0x2f, 0x96, 0x68, + 0x06, 0x23, 0x8f, 0xa1, 0x96, 0x37, 0x84, 0xbb, 0x6c, 0x28, 0xf7, 0x67, 0x28, 0x79, 0x11, 0x2e, + 0x96, 0xe8, 0x08, 0x9c, 0x12, 0xf3, 0x94, 0xb8, 0xf7, 0x16, 0x10, 0xf3, 0xb0, 0x52, 0x62, 0x0e, + 0x26, 0x3f, 0xc1, 0x56, 0x97, 0x49, 0xf4, 0x66, 0x1a, 0xd2, 0xad, 0x19, 0x99, 0x87, 0x33, 0x32, + 0xa7, 0x4c, 0xe2, 0x74, 0x43, 0x5c, 0x2c, 0xd1, 0xcd, 0xee, 0x1c, 0x7b, 0xea, 0x97, 0x2f, 0xb8, + 0xd2, 0x8c, 0x6b, 0xb7, 0xbe, 0xc0, 0xaf, 0xa7, 0x16, 0x90, 0xfa, 0x95, 0x83, 0x53, 0xa2, 0x44, + 0x25, 0x12, 0xe9, 0xa3, 0x0b, 0x0b, 0x88, 0xd4, 0x02, 0x52, 0x62, 0x0e, 0x3e, 0x5d, 0x86, 0xea, + 0x75, 0xc4, 0x83, 0xcb, 0x6a, 0xad, 0xdc, 0xac, 0x50, 0x60, 0x5a, 0xcb, 0xa8, 0x9b, 0x68, 0x54, + 0xad, 0xdf, 0x60, 0x7d, 0xaa, 0x5a, 0xe4, 0x5d, 0x80, 0x28, 0x40, 0xae, 0xa3, 0x30, 0x42, 0x69, + 0x47, 0xa2, 0x60, 0x21, 0x9f, 0xc0, 0x3d, 0xbb, 0x29, 0xec, 0x6c, 0x4c, 0x54, 0xf0, 0x55, 0x76, + 0x75, 0x86, 0x21, 0xcd, 0x61, 0x64, 0x1b, 0x6a, 0x7d, 0xd4, 0x2c, 0x60, 0x9a, 0xb9, 0x15, 0xa3, + 0x37, 0x3a, 0xb7, 0x8e, 0x01, 0xc6, 0x45, 0x27, 0x87, 0xb0, 0x69, 0x8a, 0xee, 0x85, 0x51, 0x8c, + 0x5e, 0x80, 0xa1, 0x17, 0xf1, 0x00, 0x7f, 0x31, 0x5e, 0x38, 0x74, 0xc3, 0xdc, 0x9d, 0x47, 0x31, + 0x9e, 0x61, 0xf8, 0x2c, 0xbd, 0x68, 0xfd, 0x0a, 0xab, 0x13, 0x0d, 0x40, 0x0e, 0xe6, 0xee, 0x91, + 0x74, 0xe8, 0xea, 0x73, 0x56, 0x02, 0x39, 0x86, 0xd5, 0x1c, 0xe5, 0xa9, 0x01, 0xfa, 0x36, 0x24, + 0xb7, 0x18, 0x52, 0x8e, 0xee, 0x0c, 0xd0, 0xa7, 0x2b, 0x61, 0xe1, 0xd4, 0xfa, 0xa7, 0x04, 0x6f, + 0xcd, 0xdd, 0x06, 0xe4, 0x01, 0xac, 0x74, 0x45, 0xc2, 0x03, 0x2f, 0xe2, 0x83, 0x44, 0x67, 0x9b, + 0xcc, 0xa1, 0x0d, 0x63, 0x7b, 0x66, 0x4c, 0xc4, 0x83, 0x77, 0x7c, 0xc6, 0x05, 0x8f, 0x7c, 0x16, + 0x47, 0xaf, 0xd1, 0x42, 0x3d, 0x15, 0xf5, 0x38, 0xd3, 0x89, 0x44, 0x93, 0xab, 0xe9, 0xd1, 0x32, + 0x1f, 0x88, 0x44, 0xa6, 0x9d, 0x1b, 0x27, 0x48, 0x77, 0x26, 0x14, 0x8c, 0x70, 0x27, 0xe7, 0x93, + 0x73, 0x68, 0x8a, 0x44, 0x4f, 0x6a, 0x56, 0xef, 0xd6, 0x5c, 0xcf, 0x48, 0x23, 0x9d, 0xd6, 0xdf, + 0x25, 0xb8, 0xbf, 0xb0, 0xcd, 0xc9, 0x67, 0xb0, 0x35, 0x93, 0x71, 0x8f, 0xb3, 0x3e, 0xda, 0xde, + 0xd9, 0x9c, 0xce, 0xfa, 0x77, 0xac, 0x8f, 0xe4, 0x23, 0xd8, 0x60, 0xb2, 0x97, 0xf4, 0x91, 0x6b, + 0xef, 0x1a, 0x87, 0xb7, 0x42, 0x06, 0x59, 0x92, 0xea, 0xb4, 0x99, 0x5f, 0x7c, 0x6b, 0xed, 0xe4, + 0x09, 0xec, 0xb2, 0x38, 0x16, 0xb7, 0x18, 0x78, 0x03, 0xa1, 0xa2, 0x54, 0x84, 0xc5, 0x5e, 0x0e, + 0x53, 0x26, 0x51, 0x15, 0xba, 0x6d, 0x31, 0x2f, 0x46, 0x90, 0x93, 0x1c, 0xd1, 0x3a, 0xb0, 0x7d, + 0x92, 0xcf, 0x15, 0xd9, 0x85, 0xba, 0x18, 0xa0, 0x64, 0x66, 0xac, 0x33, 0x47, 0xc7, 0x86, 0xd6, + 0x9f, 0x65, 0x8b, 0xcf, 0xf7, 0x03, 0xf9, 0x10, 0x9c, 0x20, 0xfd, 0x8e, 0x1a, 0xec, 0xda, 0xd1, + 0x66, 0x31, 0x81, 0x67, 0x4c, 0xb3, 0x97, 0xc3, 0x01, 0xd2, 0x0c, 0x42, 0x8e, 0xc0, 0x31, 0x5f, + 0x66, 0xdb, 0x4c, 0xbb, 0x13, 0x7b, 0xd8, 0xfc, 0xec, 0xa4, 0xd7, 0x2f, 0xd2, 0x8f, 0x27, 0xcd, + 0xa0, 0xa9, 0x3f, 0x5a, 0xb2, 0x88, 0x9b, 0x6d, 0x95, 0xc6, 0x53, 0xa3, 0x63, 0x03, 0x79, 0x0e, + 0xeb, 0x6a, 0xc8, 0xfd, 0x2b, 0x29, 0x78, 0xf4, 0x3a, 0xf3, 0xb9, 0x6a, 0xfc, 0x78, 0x7f, 0x62, + 0xf6, 0xac, 0xb3, 0x9d, 0x49, 0x28, 0x9d, 0xe6, 0x92, 0x13, 0x68, 0xb0, 0x5e, 0x4f, 0x62, 0x2f, + 0x93, 0x72, 0x8c, 0xd4, 0x7b, 0xf3, 0xa4, 0x4e, 0xc6, 0x30, 0x5a, 0xe4, 0x10, 0x02, 0x55, 0x53, + 0xe3, 0x65, 0x93, 0x3a, 0xf3, 0xbb, 0xf5, 0x57, 0x09, 0x56, 0x8a, 0xc3, 0x42, 0x8e, 0xa1, 0x11, + 0x26, 0x71, 0xcc, 0x64, 0xcf, 0xcc, 0x56, 0xe9, 0xee, 0xde, 0x2b, 0xe2, 0xc9, 0x0e, 0xd4, 0x23, + 0xe5, 0xf5, 0x51, 0x5f, 0x89, 0xc0, 0xe4, 0xb2, 0x46, 0x6b, 0x91, 0x7a, 0x6e, 0xce, 0xe4, 0x0c, + 0xd6, 0xa7, 0xe7, 0xc5, 0xb9, 0x5b, 0x7f, 0x2d, 0x9a, 0x18, 0x91, 0xcb, 0x6a, 0xad, 0xd2, 0xac, + 0x5e, 0x56, 0x6b, 0xd5, 0xa6, 0xd3, 0xfa, 0xc0, 0xd6, 0x3c, 0x5f, 0xa1, 0x64, 0x0b, 0x96, 0x03, + 0xbc, 0x89, 0xfc, 0xbc, 0x93, 0xed, 0xe9, 0xb4, 0xf2, 0x6f, 0xa9, 0xd4, 0x5d, 0x36, 0xff, 0x7d, + 0x1e, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x7d, 0x3f, 0xd8, 0x43, 0x0a, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/saved_object_graph.proto b/executor/proto/tensorflow/core/protobuf/saved_object_graph.proto new file mode 100644 index 0000000000..20074f3125 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/saved_object_graph.proto @@ -0,0 +1,164 @@ +syntax = "proto3"; + +import "tensorflow/core/protobuf/trackable_object_graph.proto"; +import "tensorflow/core/protobuf/struct.proto"; +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/types.proto"; +import "tensorflow/core/framework/versions.proto"; +import "tensorflow/core/framework/variable.proto"; + +option cc_enable_arenas = true; + +package tensorflow; + +// A SavedObjectGraph is part of object-based SavedModels in TF 2.0. It +// describes the directed graph of Python objects (or equivalent in other +// languages) that make up a model, with nodes[0] at the root. + +// SavedObjectGraph shares some structure with TrackableObjectGraph, but +// SavedObjectGraph belongs to the MetaGraph and contains pointers to functions +// and type information, while TrackableObjectGraph lives in the checkpoint +// and contains pointers only to variable values. + +message SavedObjectGraph { + // Flattened list of objects in the object graph. + // + // The position of the object in this list indicates its id. + // Nodes[0] is considered the root node. + repeated SavedObject nodes = 1; + + // Information about captures and output structures in concrete functions. + // Referenced from SavedBareConcreteFunction and SavedFunction. + map concrete_functions = 2; +} + +message SavedObject { + // Objects which this object depends on: named edges in the dependency + // graph. + // + // Note: currently only valid if kind == "user_object". + repeated TrackableObjectGraph.TrackableObject.ObjectReference + children = 1; + + // Removed when forking SavedObject from TrackableObjectGraph. + reserved "attributes"; + reserved 2; + + // Slot variables owned by this object. This describes the three-way + // (optimizer, variable, slot variable) relationship; none of the three + // depend on the others directly. + // + // Note: currently only valid if kind == "user_object". + repeated TrackableObjectGraph.TrackableObject.SlotVariableReference + slot_variables = 3; + + oneof kind { + SavedUserObject user_object = 4; + SavedAsset asset = 5; + SavedFunction function = 6; + SavedVariable variable = 7; + SavedBareConcreteFunction bare_concrete_function = 8; + SavedConstant constant = 9; + SavedResource resource = 10; + } +} + +// A SavedUserObject is an object (in the object-oriented language of the +// TensorFlow program) of some user- or framework-defined class other than +// those handled specifically by the other kinds of SavedObjects. +// +// This object cannot be evaluated as a tensor, and therefore cannot be bound +// to an input of a function. +message SavedUserObject { + // Corresponds to a registration of the type to use in the loading program. + string identifier = 1; + // Version information from the producer of this SavedUserObject. + VersionDef version = 2; + // Initialization-related metadata. + string metadata = 3; +} + +// A SavedAsset points to an asset in the MetaGraph. +// +// When bound to a function this object evaluates to a tensor with the absolute +// filename. Users should not depend on a particular part of the filename to +// remain stable (e.g. basename could be changed). +message SavedAsset { + // Index into `MetaGraphDef.asset_file_def[]` that describes the Asset. + // + // Only the field `AssetFileDef.filename` is used. Other fields, such as + // `AssetFileDef.tensor_info`, MUST be ignored. + int32 asset_file_def_index = 1; +} + +// A function with multiple signatures, possibly with non-Tensor arguments. +message SavedFunction { + repeated string concrete_functions = 1; + FunctionSpec function_spec = 2; +} + +// Stores low-level information about a concrete function. Referenced in either +// a SavedFunction or a SavedBareConcreteFunction. +message SavedConcreteFunction { + // Bound inputs to the function. The SavedObjects identified by the node ids + // given here are appended as extra inputs to the caller-supplied inputs. + // The only types of SavedObjects valid here are SavedVariable, SavedResource + // and SavedAsset. + repeated int32 bound_inputs = 2; + // Input in canonicalized form that was received to create this concrete + // function. + StructuredValue canonicalized_input_signature = 3; + // Output that was the return value of this function after replacing all + // Tensors with TensorSpecs. This can be an arbitrary nested function and will + // be used to reconstruct the full structure from pure tensors. + StructuredValue output_signature = 4; +} + +message SavedBareConcreteFunction { + // Identifies a SavedConcreteFunction. + string concrete_function_name = 1; + + // A sequence of unique strings, one per Tensor argument. + repeated string argument_keywords = 2; + // The prefix of `argument_keywords` which may be identified by position. + int64 allowed_positional_arguments = 3; +} + +message SavedConstant { + // An Operation name for a ConstantOp in this SavedObjectGraph's MetaGraph. + string operation = 1; +} + +// Represents a Variable that is initialized by loading the contents from the +// checkpoint. +message SavedVariable { + DataType dtype = 1; + TensorShapeProto shape = 2; + bool trainable = 3; + VariableSynchronization synchronization = 4; + VariableAggregation aggregation = 5; + string name = 6; +} + +// Represents `FunctionSpec` used in `Function`. This represents a +// function that has been wrapped as a TensorFlow `Function`. +message FunctionSpec { + // Full arg spec from inspect.getfullargspec(). + StructuredValue fullargspec = 1; + // Whether this represents a class method. + bool is_method = 2; + // The input signature, if specified. + StructuredValue input_signature = 5; + + reserved 3, 4; +} + +// A SavedResource represents a TF object that holds state during its lifetime. +// An object of this type can have a reference to a: +// create_resource() and an initialize() function. +message SavedResource { + // A device specification indicating a required placement for the resource + // creation function, e.g. "CPU". An empty string allows the user to select a + // device. + string device = 1; +} diff --git a/executor/proto/tensorflow/core/protobuf/saver.pb.go b/executor/proto/tensorflow/core/protobuf/saver.pb.go new file mode 100644 index 0000000000..811a4598d2 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/saver.pb.go @@ -0,0 +1,190 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/saver.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// A version number that identifies a different on-disk checkpoint format. +// Usually, each subclass of BaseSaverBuilder works with a particular +// version/format. However, it is possible that the same builder may be +// upgraded to support a newer checkpoint format in the future. +type SaverDef_CheckpointFormatVersion int32 + +const ( + // Internal legacy format. + SaverDef_LEGACY SaverDef_CheckpointFormatVersion = 0 + // Deprecated format: tf.Saver() which works with tensorflow::table::Table. + SaverDef_V1 SaverDef_CheckpointFormatVersion = 1 + // Current format: more efficient. + SaverDef_V2 SaverDef_CheckpointFormatVersion = 2 +) + +var SaverDef_CheckpointFormatVersion_name = map[int32]string{ + 0: "LEGACY", + 1: "V1", + 2: "V2", +} + +var SaverDef_CheckpointFormatVersion_value = map[string]int32{ + "LEGACY": 0, + "V1": 1, + "V2": 2, +} + +func (x SaverDef_CheckpointFormatVersion) String() string { + return proto.EnumName(SaverDef_CheckpointFormatVersion_name, int32(x)) +} + +func (SaverDef_CheckpointFormatVersion) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5551ea1a7581c104, []int{0, 0} +} + +// Protocol buffer representing the configuration of a Saver. +type SaverDef struct { + // The name of the tensor in which to specify the filename when saving or + // restoring a model checkpoint. + FilenameTensorName string `protobuf:"bytes,1,opt,name=filename_tensor_name,json=filenameTensorName,proto3" json:"filename_tensor_name,omitempty"` + // The operation to run when saving a model checkpoint. + SaveTensorName string `protobuf:"bytes,2,opt,name=save_tensor_name,json=saveTensorName,proto3" json:"save_tensor_name,omitempty"` + // The operation to run when restoring a model checkpoint. + RestoreOpName string `protobuf:"bytes,3,opt,name=restore_op_name,json=restoreOpName,proto3" json:"restore_op_name,omitempty"` + // Maximum number of checkpoints to keep. If 0, no checkpoints are deleted. + MaxToKeep int32 `protobuf:"varint,4,opt,name=max_to_keep,json=maxToKeep,proto3" json:"max_to_keep,omitempty"` + // Shard the save files, one per device that has Variable nodes. + Sharded bool `protobuf:"varint,5,opt,name=sharded,proto3" json:"sharded,omitempty"` + // How often to keep an additional checkpoint. If not specified, only the last + // "max_to_keep" checkpoints are kept; if specified, in addition to keeping + // the last "max_to_keep" checkpoints, an additional checkpoint will be kept + // for every n hours of training. + KeepCheckpointEveryNHours float32 `protobuf:"fixed32,6,opt,name=keep_checkpoint_every_n_hours,json=keepCheckpointEveryNHours,proto3" json:"keep_checkpoint_every_n_hours,omitempty"` + Version SaverDef_CheckpointFormatVersion `protobuf:"varint,7,opt,name=version,proto3,enum=tensorflow.SaverDef_CheckpointFormatVersion" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SaverDef) Reset() { *m = SaverDef{} } +func (m *SaverDef) String() string { return proto.CompactTextString(m) } +func (*SaverDef) ProtoMessage() {} +func (*SaverDef) Descriptor() ([]byte, []int) { + return fileDescriptor_5551ea1a7581c104, []int{0} +} + +func (m *SaverDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SaverDef.Unmarshal(m, b) +} +func (m *SaverDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SaverDef.Marshal(b, m, deterministic) +} +func (m *SaverDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_SaverDef.Merge(m, src) +} +func (m *SaverDef) XXX_Size() int { + return xxx_messageInfo_SaverDef.Size(m) +} +func (m *SaverDef) XXX_DiscardUnknown() { + xxx_messageInfo_SaverDef.DiscardUnknown(m) +} + +var xxx_messageInfo_SaverDef proto.InternalMessageInfo + +func (m *SaverDef) GetFilenameTensorName() string { + if m != nil { + return m.FilenameTensorName + } + return "" +} + +func (m *SaverDef) GetSaveTensorName() string { + if m != nil { + return m.SaveTensorName + } + return "" +} + +func (m *SaverDef) GetRestoreOpName() string { + if m != nil { + return m.RestoreOpName + } + return "" +} + +func (m *SaverDef) GetMaxToKeep() int32 { + if m != nil { + return m.MaxToKeep + } + return 0 +} + +func (m *SaverDef) GetSharded() bool { + if m != nil { + return m.Sharded + } + return false +} + +func (m *SaverDef) GetKeepCheckpointEveryNHours() float32 { + if m != nil { + return m.KeepCheckpointEveryNHours + } + return 0 +} + +func (m *SaverDef) GetVersion() SaverDef_CheckpointFormatVersion { + if m != nil { + return m.Version + } + return SaverDef_LEGACY +} + +func init() { + proto.RegisterEnum("tensorflow.SaverDef_CheckpointFormatVersion", SaverDef_CheckpointFormatVersion_name, SaverDef_CheckpointFormatVersion_value) + proto.RegisterType((*SaverDef)(nil), "tensorflow.SaverDef") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/saver.proto", fileDescriptor_5551ea1a7581c104) +} + +var fileDescriptor_5551ea1a7581c104 = []byte{ + // 362 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xc1, 0x8f, 0xd2, 0x40, + 0x14, 0xc6, 0x9d, 0x22, 0x05, 0x86, 0x88, 0xcd, 0x68, 0x62, 0x3d, 0x68, 0x1a, 0x62, 0x4c, 0x0f, + 0xa6, 0x55, 0x8c, 0x37, 0x0f, 0x0a, 0x82, 0x26, 0x1a, 0x24, 0x95, 0x90, 0xb8, 0x97, 0x49, 0x29, + 0xaf, 0xb4, 0x81, 0xf6, 0x35, 0xd3, 0x29, 0xcb, 0xfe, 0x09, 0xfb, 0x1f, 0xef, 0x71, 0x33, 0xc3, + 0x76, 0x81, 0x64, 0xf7, 0xd4, 0xf7, 0xbd, 0xef, 0xf7, 0xf5, 0x25, 0xef, 0x0d, 0x7d, 0x27, 0x21, + 0x2f, 0x51, 0xc4, 0x5b, 0xbc, 0xf4, 0x23, 0x14, 0xe0, 0x17, 0x02, 0x25, 0x2e, 0xab, 0xd8, 0x2f, + 0xc3, 0x1d, 0x08, 0x4f, 0x4b, 0x46, 0x8f, 0x54, 0xff, 0xba, 0x41, 0xdb, 0xff, 0x94, 0xf7, 0x03, + 0x62, 0xf6, 0x91, 0xbe, 0x8c, 0xd3, 0x2d, 0xe4, 0x61, 0x06, 0xfc, 0xc0, 0x70, 0x55, 0xdb, 0xc4, + 0x21, 0x6e, 0x27, 0x60, 0xb5, 0x37, 0xd7, 0xd6, 0x34, 0xcc, 0x80, 0xb9, 0xd4, 0x52, 0x7f, 0x3e, + 0xa3, 0x0d, 0x4d, 0xf7, 0x54, 0xff, 0x84, 0x7c, 0x4f, 0x9f, 0x0b, 0x28, 0x25, 0x0a, 0xe0, 0x58, + 0x1c, 0xc0, 0x86, 0x06, 0x9f, 0xdd, 0xb5, 0xff, 0x16, 0x9a, 0x7b, 0x4b, 0xbb, 0x59, 0xb8, 0xe7, + 0x12, 0xf9, 0x06, 0xa0, 0xb0, 0x9f, 0x3a, 0xc4, 0x6d, 0x06, 0x9d, 0x2c, 0xdc, 0xcf, 0xf1, 0x37, + 0x40, 0xc1, 0x6c, 0xda, 0x2a, 0x93, 0x50, 0xac, 0x60, 0x65, 0x37, 0x1d, 0xe2, 0xb6, 0x83, 0x5a, + 0xb2, 0x6f, 0xf4, 0x8d, 0x8a, 0xf0, 0x28, 0x81, 0x68, 0x53, 0x60, 0x9a, 0x4b, 0x0e, 0x3b, 0x10, + 0x57, 0x3c, 0xe7, 0x09, 0x56, 0xa2, 0xb4, 0x4d, 0x87, 0xb8, 0x46, 0xf0, 0x5a, 0x41, 0xa3, 0x7b, + 0x66, 0xac, 0x90, 0xe9, 0x2f, 0x05, 0xb0, 0x09, 0x6d, 0xed, 0x40, 0x94, 0x29, 0xe6, 0x76, 0xcb, + 0x21, 0x6e, 0x6f, 0xf0, 0xc1, 0x3b, 0xae, 0xca, 0xab, 0xd7, 0xe4, 0x1d, 0xc3, 0x13, 0x14, 0x59, + 0x28, 0x17, 0x87, 0x4c, 0x50, 0x87, 0xfb, 0x5f, 0xe8, 0xab, 0x47, 0x18, 0x46, 0xa9, 0xf9, 0x67, + 0xfc, 0xf3, 0xfb, 0xe8, 0xbf, 0xf5, 0x84, 0x99, 0xd4, 0x58, 0x7c, 0xb2, 0x88, 0xfe, 0x0e, 0x2c, + 0x63, 0x08, 0xf4, 0x05, 0x8a, 0xf5, 0xe9, 0xc8, 0x4a, 0xa6, 0xdb, 0x61, 0x57, 0x0f, 0x9e, 0xa9, + 0xd3, 0x95, 0x33, 0x72, 0xf1, 0x75, 0x9d, 0xca, 0xa4, 0x5a, 0x7a, 0x11, 0x66, 0xfe, 0xc9, 0xb9, + 0x1f, 0x2e, 0xd7, 0x78, 0xfe, 0x0e, 0x6e, 0x08, 0x59, 0x9a, 0x5a, 0x7c, 0xbe, 0x0d, 0x00, 0x00, + 0xff, 0xff, 0x52, 0xc9, 0x64, 0xa0, 0x2d, 0x02, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/saver.proto b/executor/proto/tensorflow/core/protobuf/saver.proto new file mode 100644 index 0000000000..4245386145 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/saver.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "SaverProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.util"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf"; + +// Protocol buffer representing the configuration of a Saver. +message SaverDef { + // The name of the tensor in which to specify the filename when saving or + // restoring a model checkpoint. + string filename_tensor_name = 1; + + // The operation to run when saving a model checkpoint. + string save_tensor_name = 2; + + // The operation to run when restoring a model checkpoint. + string restore_op_name = 3; + + // Maximum number of checkpoints to keep. If 0, no checkpoints are deleted. + int32 max_to_keep = 4; + + // Shard the save files, one per device that has Variable nodes. + bool sharded = 5; + + // How often to keep an additional checkpoint. If not specified, only the last + // "max_to_keep" checkpoints are kept; if specified, in addition to keeping + // the last "max_to_keep" checkpoints, an additional checkpoint will be kept + // for every n hours of training. + float keep_checkpoint_every_n_hours = 6; + + // A version number that identifies a different on-disk checkpoint format. + // Usually, each subclass of BaseSaverBuilder works with a particular + // version/format. However, it is possible that the same builder may be + // upgraded to support a newer checkpoint format in the future. + enum CheckpointFormatVersion { + // Internal legacy format. + LEGACY = 0; + // Deprecated format: tf.Saver() which works with tensorflow::table::Table. + V1 = 1; + // Current format: more efficient. + V2 = 2; + } + CheckpointFormatVersion version = 7; +} diff --git a/executor/proto/tensorflow/core/protobuf/struct.pb.go b/executor/proto/tensorflow/core/protobuf/struct.pb.go new file mode 100644 index 0000000000..748b8821cd --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/struct.pb.go @@ -0,0 +1,778 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/struct.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + framework "github.com/tensorflow/tensorflow/tensorflow/go/core/framework" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type TypeSpecProto_TypeSpecClass int32 + +const ( + TypeSpecProto_UNKNOWN TypeSpecProto_TypeSpecClass = 0 + TypeSpecProto_SPARSE_TENSOR_SPEC TypeSpecProto_TypeSpecClass = 1 + TypeSpecProto_INDEXED_SLICES_SPEC TypeSpecProto_TypeSpecClass = 2 + TypeSpecProto_RAGGED_TENSOR_SPEC TypeSpecProto_TypeSpecClass = 3 + TypeSpecProto_TENSOR_ARRAY_SPEC TypeSpecProto_TypeSpecClass = 4 + TypeSpecProto_DATA_DATASET_SPEC TypeSpecProto_TypeSpecClass = 5 + TypeSpecProto_DATA_ITERATOR_SPEC TypeSpecProto_TypeSpecClass = 6 + TypeSpecProto_OPTIONAL_SPEC TypeSpecProto_TypeSpecClass = 7 + TypeSpecProto_PER_REPLICA_SPEC TypeSpecProto_TypeSpecClass = 8 +) + +var TypeSpecProto_TypeSpecClass_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SPARSE_TENSOR_SPEC", + 2: "INDEXED_SLICES_SPEC", + 3: "RAGGED_TENSOR_SPEC", + 4: "TENSOR_ARRAY_SPEC", + 5: "DATA_DATASET_SPEC", + 6: "DATA_ITERATOR_SPEC", + 7: "OPTIONAL_SPEC", + 8: "PER_REPLICA_SPEC", +} + +var TypeSpecProto_TypeSpecClass_value = map[string]int32{ + "UNKNOWN": 0, + "SPARSE_TENSOR_SPEC": 1, + "INDEXED_SLICES_SPEC": 2, + "RAGGED_TENSOR_SPEC": 3, + "TENSOR_ARRAY_SPEC": 4, + "DATA_DATASET_SPEC": 5, + "DATA_ITERATOR_SPEC": 6, + "OPTIONAL_SPEC": 7, + "PER_REPLICA_SPEC": 8, +} + +func (x TypeSpecProto_TypeSpecClass) String() string { + return proto.EnumName(TypeSpecProto_TypeSpecClass_name, int32(x)) +} + +func (TypeSpecProto_TypeSpecClass) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8f6f8fd91d5fa722, []int{8, 0} +} + +// `StructuredValue` represents a dynamically typed value representing various +// data structures that are inspired by Python data structures typically used in +// TensorFlow functions as inputs and outputs. +// +// For example when saving a Layer there may be a `training` argument. If the +// user passes a boolean True/False, that switches between two concrete +// TensorFlow functions. In order to switch between them in the same way after +// loading the SavedModel, we need to represent "True" and "False". +// +// A more advanced example might be a function which takes a list of +// dictionaries mapping from strings to Tensors. In order to map from +// user-specified arguments `[{"a": tf.constant(1.)}, {"q": tf.constant(3.)}]` +// after load to the right saved TensorFlow function, we need to represent the +// nested structure and the strings, recording that we have a trace for anything +// matching `[{"a": tf.TensorSpec(None, tf.float32)}, {"q": tf.TensorSpec([], +// tf.float64)}]` as an example. +// +// Likewise functions may return nested structures of Tensors, for example +// returning a dictionary mapping from strings to Tensors. In order for the +// loaded function to return the same structure we need to serialize it. +// +// This is an ergonomic aid for working with loaded SavedModels, not a promise +// to serialize all possible function signatures. For example we do not expect +// to pickle generic Python objects, and ideally we'd stay language-agnostic. +type StructuredValue struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *StructuredValue_NoneValue + // *StructuredValue_Float64Value + // *StructuredValue_Int64Value + // *StructuredValue_StringValue + // *StructuredValue_BoolValue + // *StructuredValue_TensorShapeValue + // *StructuredValue_TensorDtypeValue + // *StructuredValue_TensorSpecValue + // *StructuredValue_TypeSpecValue + // *StructuredValue_ListValue + // *StructuredValue_TupleValue + // *StructuredValue_DictValue + // *StructuredValue_NamedTupleValue + Kind isStructuredValue_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StructuredValue) Reset() { *m = StructuredValue{} } +func (m *StructuredValue) String() string { return proto.CompactTextString(m) } +func (*StructuredValue) ProtoMessage() {} +func (*StructuredValue) Descriptor() ([]byte, []int) { + return fileDescriptor_8f6f8fd91d5fa722, []int{0} +} + +func (m *StructuredValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StructuredValue.Unmarshal(m, b) +} +func (m *StructuredValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StructuredValue.Marshal(b, m, deterministic) +} +func (m *StructuredValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StructuredValue.Merge(m, src) +} +func (m *StructuredValue) XXX_Size() int { + return xxx_messageInfo_StructuredValue.Size(m) +} +func (m *StructuredValue) XXX_DiscardUnknown() { + xxx_messageInfo_StructuredValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StructuredValue proto.InternalMessageInfo + +type isStructuredValue_Kind interface { + isStructuredValue_Kind() +} + +type StructuredValue_NoneValue struct { + NoneValue *NoneValue `protobuf:"bytes,1,opt,name=none_value,json=noneValue,proto3,oneof"` +} + +type StructuredValue_Float64Value struct { + Float64Value float64 `protobuf:"fixed64,11,opt,name=float64_value,json=float64Value,proto3,oneof"` +} + +type StructuredValue_Int64Value struct { + Int64Value int64 `protobuf:"zigzag64,12,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type StructuredValue_StringValue struct { + StringValue string `protobuf:"bytes,13,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type StructuredValue_BoolValue struct { + BoolValue bool `protobuf:"varint,14,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type StructuredValue_TensorShapeValue struct { + TensorShapeValue *framework.TensorShapeProto `protobuf:"bytes,31,opt,name=tensor_shape_value,json=tensorShapeValue,proto3,oneof"` +} + +type StructuredValue_TensorDtypeValue struct { + TensorDtypeValue framework.DataType `protobuf:"varint,32,opt,name=tensor_dtype_value,json=tensorDtypeValue,proto3,enum=tensorflow.DataType,oneof"` +} + +type StructuredValue_TensorSpecValue struct { + TensorSpecValue *TensorSpecProto `protobuf:"bytes,33,opt,name=tensor_spec_value,json=tensorSpecValue,proto3,oneof"` +} + +type StructuredValue_TypeSpecValue struct { + TypeSpecValue *TypeSpecProto `protobuf:"bytes,34,opt,name=type_spec_value,json=typeSpecValue,proto3,oneof"` +} + +type StructuredValue_ListValue struct { + ListValue *ListValue `protobuf:"bytes,51,opt,name=list_value,json=listValue,proto3,oneof"` +} + +type StructuredValue_TupleValue struct { + TupleValue *TupleValue `protobuf:"bytes,52,opt,name=tuple_value,json=tupleValue,proto3,oneof"` +} + +type StructuredValue_DictValue struct { + DictValue *DictValue `protobuf:"bytes,53,opt,name=dict_value,json=dictValue,proto3,oneof"` +} + +type StructuredValue_NamedTupleValue struct { + NamedTupleValue *NamedTupleValue `protobuf:"bytes,54,opt,name=named_tuple_value,json=namedTupleValue,proto3,oneof"` +} + +func (*StructuredValue_NoneValue) isStructuredValue_Kind() {} + +func (*StructuredValue_Float64Value) isStructuredValue_Kind() {} + +func (*StructuredValue_Int64Value) isStructuredValue_Kind() {} + +func (*StructuredValue_StringValue) isStructuredValue_Kind() {} + +func (*StructuredValue_BoolValue) isStructuredValue_Kind() {} + +func (*StructuredValue_TensorShapeValue) isStructuredValue_Kind() {} + +func (*StructuredValue_TensorDtypeValue) isStructuredValue_Kind() {} + +func (*StructuredValue_TensorSpecValue) isStructuredValue_Kind() {} + +func (*StructuredValue_TypeSpecValue) isStructuredValue_Kind() {} + +func (*StructuredValue_ListValue) isStructuredValue_Kind() {} + +func (*StructuredValue_TupleValue) isStructuredValue_Kind() {} + +func (*StructuredValue_DictValue) isStructuredValue_Kind() {} + +func (*StructuredValue_NamedTupleValue) isStructuredValue_Kind() {} + +func (m *StructuredValue) GetKind() isStructuredValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *StructuredValue) GetNoneValue() *NoneValue { + if x, ok := m.GetKind().(*StructuredValue_NoneValue); ok { + return x.NoneValue + } + return nil +} + +func (m *StructuredValue) GetFloat64Value() float64 { + if x, ok := m.GetKind().(*StructuredValue_Float64Value); ok { + return x.Float64Value + } + return 0 +} + +func (m *StructuredValue) GetInt64Value() int64 { + if x, ok := m.GetKind().(*StructuredValue_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (m *StructuredValue) GetStringValue() string { + if x, ok := m.GetKind().(*StructuredValue_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *StructuredValue) GetBoolValue() bool { + if x, ok := m.GetKind().(*StructuredValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *StructuredValue) GetTensorShapeValue() *framework.TensorShapeProto { + if x, ok := m.GetKind().(*StructuredValue_TensorShapeValue); ok { + return x.TensorShapeValue + } + return nil +} + +func (m *StructuredValue) GetTensorDtypeValue() framework.DataType { + if x, ok := m.GetKind().(*StructuredValue_TensorDtypeValue); ok { + return x.TensorDtypeValue + } + return framework.DataType_DT_INVALID +} + +func (m *StructuredValue) GetTensorSpecValue() *TensorSpecProto { + if x, ok := m.GetKind().(*StructuredValue_TensorSpecValue); ok { + return x.TensorSpecValue + } + return nil +} + +func (m *StructuredValue) GetTypeSpecValue() *TypeSpecProto { + if x, ok := m.GetKind().(*StructuredValue_TypeSpecValue); ok { + return x.TypeSpecValue + } + return nil +} + +func (m *StructuredValue) GetListValue() *ListValue { + if x, ok := m.GetKind().(*StructuredValue_ListValue); ok { + return x.ListValue + } + return nil +} + +func (m *StructuredValue) GetTupleValue() *TupleValue { + if x, ok := m.GetKind().(*StructuredValue_TupleValue); ok { + return x.TupleValue + } + return nil +} + +func (m *StructuredValue) GetDictValue() *DictValue { + if x, ok := m.GetKind().(*StructuredValue_DictValue); ok { + return x.DictValue + } + return nil +} + +func (m *StructuredValue) GetNamedTupleValue() *NamedTupleValue { + if x, ok := m.GetKind().(*StructuredValue_NamedTupleValue); ok { + return x.NamedTupleValue + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*StructuredValue) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*StructuredValue_NoneValue)(nil), + (*StructuredValue_Float64Value)(nil), + (*StructuredValue_Int64Value)(nil), + (*StructuredValue_StringValue)(nil), + (*StructuredValue_BoolValue)(nil), + (*StructuredValue_TensorShapeValue)(nil), + (*StructuredValue_TensorDtypeValue)(nil), + (*StructuredValue_TensorSpecValue)(nil), + (*StructuredValue_TypeSpecValue)(nil), + (*StructuredValue_ListValue)(nil), + (*StructuredValue_TupleValue)(nil), + (*StructuredValue_DictValue)(nil), + (*StructuredValue_NamedTupleValue)(nil), + } +} + +// Represents None. +type NoneValue struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NoneValue) Reset() { *m = NoneValue{} } +func (m *NoneValue) String() string { return proto.CompactTextString(m) } +func (*NoneValue) ProtoMessage() {} +func (*NoneValue) Descriptor() ([]byte, []int) { + return fileDescriptor_8f6f8fd91d5fa722, []int{1} +} + +func (m *NoneValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NoneValue.Unmarshal(m, b) +} +func (m *NoneValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NoneValue.Marshal(b, m, deterministic) +} +func (m *NoneValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_NoneValue.Merge(m, src) +} +func (m *NoneValue) XXX_Size() int { + return xxx_messageInfo_NoneValue.Size(m) +} +func (m *NoneValue) XXX_DiscardUnknown() { + xxx_messageInfo_NoneValue.DiscardUnknown(m) +} + +var xxx_messageInfo_NoneValue proto.InternalMessageInfo + +// Represents a Python list. +type ListValue struct { + Values []*StructuredValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (m *ListValue) String() string { return proto.CompactTextString(m) } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { + return fileDescriptor_8f6f8fd91d5fa722, []int{2} +} + +func (m *ListValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListValue.Unmarshal(m, b) +} +func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) +} +func (m *ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListValue.Merge(m, src) +} +func (m *ListValue) XXX_Size() int { + return xxx_messageInfo_ListValue.Size(m) +} +func (m *ListValue) XXX_DiscardUnknown() { + xxx_messageInfo_ListValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ListValue proto.InternalMessageInfo + +func (m *ListValue) GetValues() []*StructuredValue { + if m != nil { + return m.Values + } + return nil +} + +// Represents a Python tuple. +type TupleValue struct { + Values []*StructuredValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TupleValue) Reset() { *m = TupleValue{} } +func (m *TupleValue) String() string { return proto.CompactTextString(m) } +func (*TupleValue) ProtoMessage() {} +func (*TupleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_8f6f8fd91d5fa722, []int{3} +} + +func (m *TupleValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TupleValue.Unmarshal(m, b) +} +func (m *TupleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TupleValue.Marshal(b, m, deterministic) +} +func (m *TupleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_TupleValue.Merge(m, src) +} +func (m *TupleValue) XXX_Size() int { + return xxx_messageInfo_TupleValue.Size(m) +} +func (m *TupleValue) XXX_DiscardUnknown() { + xxx_messageInfo_TupleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_TupleValue proto.InternalMessageInfo + +func (m *TupleValue) GetValues() []*StructuredValue { + if m != nil { + return m.Values + } + return nil +} + +// Represents a Python dict keyed by `str`. +// The comment on Unicode from Value.string_value applies analogously. +type DictValue struct { + Fields map[string]*StructuredValue `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DictValue) Reset() { *m = DictValue{} } +func (m *DictValue) String() string { return proto.CompactTextString(m) } +func (*DictValue) ProtoMessage() {} +func (*DictValue) Descriptor() ([]byte, []int) { + return fileDescriptor_8f6f8fd91d5fa722, []int{4} +} + +func (m *DictValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DictValue.Unmarshal(m, b) +} +func (m *DictValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DictValue.Marshal(b, m, deterministic) +} +func (m *DictValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DictValue.Merge(m, src) +} +func (m *DictValue) XXX_Size() int { + return xxx_messageInfo_DictValue.Size(m) +} +func (m *DictValue) XXX_DiscardUnknown() { + xxx_messageInfo_DictValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DictValue proto.InternalMessageInfo + +func (m *DictValue) GetFields() map[string]*StructuredValue { + if m != nil { + return m.Fields + } + return nil +} + +// Represents a (key, value) pair. +type PairValue struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *StructuredValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PairValue) Reset() { *m = PairValue{} } +func (m *PairValue) String() string { return proto.CompactTextString(m) } +func (*PairValue) ProtoMessage() {} +func (*PairValue) Descriptor() ([]byte, []int) { + return fileDescriptor_8f6f8fd91d5fa722, []int{5} +} + +func (m *PairValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PairValue.Unmarshal(m, b) +} +func (m *PairValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PairValue.Marshal(b, m, deterministic) +} +func (m *PairValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_PairValue.Merge(m, src) +} +func (m *PairValue) XXX_Size() int { + return xxx_messageInfo_PairValue.Size(m) +} +func (m *PairValue) XXX_DiscardUnknown() { + xxx_messageInfo_PairValue.DiscardUnknown(m) +} + +var xxx_messageInfo_PairValue proto.InternalMessageInfo + +func (m *PairValue) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *PairValue) GetValue() *StructuredValue { + if m != nil { + return m.Value + } + return nil +} + +// Represents Python's namedtuple. +type NamedTupleValue struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Values []*PairValue `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedTupleValue) Reset() { *m = NamedTupleValue{} } +func (m *NamedTupleValue) String() string { return proto.CompactTextString(m) } +func (*NamedTupleValue) ProtoMessage() {} +func (*NamedTupleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_8f6f8fd91d5fa722, []int{6} +} + +func (m *NamedTupleValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedTupleValue.Unmarshal(m, b) +} +func (m *NamedTupleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedTupleValue.Marshal(b, m, deterministic) +} +func (m *NamedTupleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedTupleValue.Merge(m, src) +} +func (m *NamedTupleValue) XXX_Size() int { + return xxx_messageInfo_NamedTupleValue.Size(m) +} +func (m *NamedTupleValue) XXX_DiscardUnknown() { + xxx_messageInfo_NamedTupleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedTupleValue proto.InternalMessageInfo + +func (m *NamedTupleValue) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedTupleValue) GetValues() []*PairValue { + if m != nil { + return m.Values + } + return nil +} + +// A protobuf to tf.TensorSpec. +type TensorSpecProto struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Shape *framework.TensorShapeProto `protobuf:"bytes,2,opt,name=shape,proto3" json:"shape,omitempty"` + Dtype framework.DataType `protobuf:"varint,3,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TensorSpecProto) Reset() { *m = TensorSpecProto{} } +func (m *TensorSpecProto) String() string { return proto.CompactTextString(m) } +func (*TensorSpecProto) ProtoMessage() {} +func (*TensorSpecProto) Descriptor() ([]byte, []int) { + return fileDescriptor_8f6f8fd91d5fa722, []int{7} +} + +func (m *TensorSpecProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TensorSpecProto.Unmarshal(m, b) +} +func (m *TensorSpecProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TensorSpecProto.Marshal(b, m, deterministic) +} +func (m *TensorSpecProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_TensorSpecProto.Merge(m, src) +} +func (m *TensorSpecProto) XXX_Size() int { + return xxx_messageInfo_TensorSpecProto.Size(m) +} +func (m *TensorSpecProto) XXX_DiscardUnknown() { + xxx_messageInfo_TensorSpecProto.DiscardUnknown(m) +} + +var xxx_messageInfo_TensorSpecProto proto.InternalMessageInfo + +func (m *TensorSpecProto) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *TensorSpecProto) GetShape() *framework.TensorShapeProto { + if m != nil { + return m.Shape + } + return nil +} + +func (m *TensorSpecProto) GetDtype() framework.DataType { + if m != nil { + return m.Dtype + } + return framework.DataType_DT_INVALID +} + +// Represents a tf.TypeSpec +type TypeSpecProto struct { + TypeSpecClass TypeSpecProto_TypeSpecClass `protobuf:"varint,1,opt,name=type_spec_class,json=typeSpecClass,proto3,enum=tensorflow.TypeSpecProto_TypeSpecClass" json:"type_spec_class,omitempty"` + // The value returned by TypeSpec._serialize(). + TypeState *StructuredValue `protobuf:"bytes,2,opt,name=type_state,json=typeState,proto3" json:"type_state,omitempty"` + // This is currently redundant with the type_spec_class enum, and is only + // used for error reporting. In particular, if you use an older binary to + // load a newer model, and the model uses a TypeSpecClass that the older + // binary doesn't support, then this lets us display a useful error message. + TypeSpecClassName string `protobuf:"bytes,3,opt,name=type_spec_class_name,json=typeSpecClassName,proto3" json:"type_spec_class_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TypeSpecProto) Reset() { *m = TypeSpecProto{} } +func (m *TypeSpecProto) String() string { return proto.CompactTextString(m) } +func (*TypeSpecProto) ProtoMessage() {} +func (*TypeSpecProto) Descriptor() ([]byte, []int) { + return fileDescriptor_8f6f8fd91d5fa722, []int{8} +} + +func (m *TypeSpecProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TypeSpecProto.Unmarshal(m, b) +} +func (m *TypeSpecProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TypeSpecProto.Marshal(b, m, deterministic) +} +func (m *TypeSpecProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeSpecProto.Merge(m, src) +} +func (m *TypeSpecProto) XXX_Size() int { + return xxx_messageInfo_TypeSpecProto.Size(m) +} +func (m *TypeSpecProto) XXX_DiscardUnknown() { + xxx_messageInfo_TypeSpecProto.DiscardUnknown(m) +} + +var xxx_messageInfo_TypeSpecProto proto.InternalMessageInfo + +func (m *TypeSpecProto) GetTypeSpecClass() TypeSpecProto_TypeSpecClass { + if m != nil { + return m.TypeSpecClass + } + return TypeSpecProto_UNKNOWN +} + +func (m *TypeSpecProto) GetTypeState() *StructuredValue { + if m != nil { + return m.TypeState + } + return nil +} + +func (m *TypeSpecProto) GetTypeSpecClassName() string { + if m != nil { + return m.TypeSpecClassName + } + return "" +} + +func init() { + proto.RegisterEnum("tensorflow.TypeSpecProto_TypeSpecClass", TypeSpecProto_TypeSpecClass_name, TypeSpecProto_TypeSpecClass_value) + proto.RegisterType((*StructuredValue)(nil), "tensorflow.StructuredValue") + proto.RegisterType((*NoneValue)(nil), "tensorflow.NoneValue") + proto.RegisterType((*ListValue)(nil), "tensorflow.ListValue") + proto.RegisterType((*TupleValue)(nil), "tensorflow.TupleValue") + proto.RegisterType((*DictValue)(nil), "tensorflow.DictValue") + proto.RegisterMapType((map[string]*StructuredValue)(nil), "tensorflow.DictValue.FieldsEntry") + proto.RegisterType((*PairValue)(nil), "tensorflow.PairValue") + proto.RegisterType((*NamedTupleValue)(nil), "tensorflow.NamedTupleValue") + proto.RegisterType((*TensorSpecProto)(nil), "tensorflow.TensorSpecProto") + proto.RegisterType((*TypeSpecProto)(nil), "tensorflow.TypeSpecProto") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/struct.proto", fileDescriptor_8f6f8fd91d5fa722) +} + +var fileDescriptor_8f6f8fd91d5fa722 = []byte{ + // 801 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x5d, 0x6f, 0xe3, 0x44, + 0x14, 0x8d, 0x9b, 0x26, 0xbb, 0xbe, 0x6e, 0x9a, 0x74, 0xe8, 0x2e, 0xa5, 0x20, 0xad, 0x6b, 0x54, + 0x11, 0x21, 0x68, 0x44, 0xba, 0x54, 0xec, 0x3e, 0x61, 0x62, 0xb3, 0x89, 0x88, 0x1c, 0x6b, 0x6c, + 0x16, 0x78, 0xb2, 0xdc, 0x64, 0x02, 0x56, 0xbd, 0xb6, 0x65, 0x4f, 0x58, 0xe5, 0x07, 0xf0, 0x37, + 0x78, 0xe2, 0x27, 0xf1, 0x6f, 0x78, 0x41, 0x33, 0xe3, 0xcf, 0xa4, 0x5b, 0x21, 0xb4, 0x2f, 0x95, + 0xef, 0xf1, 0xb9, 0xe7, 0xdc, 0x39, 0x93, 0xeb, 0xc2, 0x25, 0x25, 0x51, 0x16, 0xa7, 0xeb, 0x30, + 0x7e, 0x3b, 0x5a, 0xc6, 0x29, 0x19, 0x25, 0x69, 0x4c, 0xe3, 0xdb, 0xcd, 0x7a, 0x94, 0xd1, 0x74, + 0xb3, 0xa4, 0x57, 0xbc, 0x46, 0x50, 0xd1, 0xce, 0xbf, 0xd8, 0x6d, 0x59, 0xa7, 0xfe, 0x1b, 0xf2, + 0x36, 0x4e, 0xef, 0x46, 0xe2, 0x8d, 0x97, 0xfd, 0xe6, 0x27, 0x44, 0x74, 0x9e, 0x5f, 0x3e, 0xc0, + 0xde, 0x26, 0x24, 0x13, 0x34, 0xed, 0x9f, 0x0e, 0xf4, 0x1d, 0xee, 0xb8, 0x49, 0xc9, 0xea, 0xb5, + 0x1f, 0x6e, 0x08, 0xba, 0x01, 0x88, 0xe2, 0x88, 0x78, 0xbf, 0xb3, 0xea, 0x4c, 0x52, 0xa5, 0xa1, + 0x32, 0x7e, 0x72, 0x55, 0xe9, 0x5d, 0x59, 0x71, 0x44, 0x38, 0x75, 0xda, 0xc2, 0x72, 0x54, 0x14, + 0xe8, 0x12, 0x7a, 0xeb, 0x30, 0xf6, 0xe9, 0xcd, 0xf3, 0xbc, 0x55, 0x51, 0xa5, 0xa1, 0x34, 0x6d, + 0xe1, 0xa3, 0x1c, 0x16, 0xb4, 0x0b, 0x50, 0x82, 0xa8, 0x22, 0x1d, 0xa9, 0xd2, 0x10, 0x4d, 0x5b, + 0x18, 0x38, 0x28, 0x28, 0x9f, 0xc2, 0x51, 0x46, 0xd3, 0x20, 0xfa, 0x35, 0xe7, 0xf4, 0x54, 0x69, + 0x28, 0x4f, 0x5b, 0x58, 0x11, 0xa8, 0x20, 0x3d, 0x03, 0xb8, 0x8d, 0xe3, 0x30, 0xa7, 0x1c, 0xab, + 0xd2, 0xf0, 0x31, 0x9b, 0x87, 0x61, 0x82, 0x30, 0x07, 0x54, 0x0f, 0x26, 0x27, 0x3e, 0xe3, 0xe7, + 0xf9, 0xa4, 0x7e, 0x1e, 0x97, 0x3f, 0x3a, 0x8c, 0x64, 0xb3, 0x54, 0xa6, 0x2d, 0x3c, 0xa0, 0x15, + 0x26, 0xd4, 0x8c, 0x52, 0x6d, 0xc5, 0x02, 0xcc, 0xd5, 0x54, 0x55, 0x1a, 0x1e, 0x8f, 0x4f, 0xeb, + 0x6a, 0x86, 0x4f, 0x7d, 0x77, 0x9b, 0x90, 0x4a, 0xc5, 0x60, 0x0d, 0x42, 0x65, 0x06, 0x27, 0xc5, + 0x4c, 0x09, 0x59, 0xe6, 0x22, 0x17, 0x7c, 0xa4, 0x8f, 0xef, 0x19, 0x29, 0x21, 0xcb, 0x62, 0xa2, + 0x3e, 0x2d, 0x21, 0x21, 0x35, 0x81, 0x3e, 0x1f, 0xa4, 0x26, 0xa4, 0x71, 0xa1, 0x8f, 0x1a, 0x42, + 0xdb, 0x84, 0xd4, 0x65, 0x7a, 0x34, 0x07, 0xca, 0xbb, 0x0e, 0x83, 0x8c, 0xe6, 0xfd, 0xd7, 0xfb, + 0x77, 0x3d, 0x0f, 0x32, 0x5a, 0xde, 0x75, 0x58, 0x14, 0xe8, 0x05, 0x28, 0x74, 0x93, 0x84, 0x45, + 0x0c, 0xcf, 0x79, 0xe3, 0xd3, 0x86, 0x31, 0x7b, 0x5d, 0x74, 0x02, 0x2d, 0x2b, 0x66, 0xb9, 0x0a, + 0x96, 0x85, 0xe5, 0xd7, 0xfb, 0x96, 0x46, 0xb0, 0xac, 0x2c, 0x57, 0x45, 0xc1, 0xa2, 0x8b, 0xfc, + 0x37, 0x64, 0xe5, 0xd5, 0x8d, 0x6f, 0xf6, 0xa3, 0xb3, 0x18, 0xa9, 0xe1, 0xde, 0x8f, 0x9a, 0xd0, + 0x77, 0x5d, 0x38, 0xbc, 0x0b, 0xa2, 0x95, 0xa6, 0x80, 0x5c, 0xfe, 0x96, 0xb5, 0x6f, 0x41, 0x2e, + 0x0f, 0x8b, 0xae, 0xa1, 0xcb, 0x0d, 0xb2, 0x33, 0x49, 0x6d, 0xef, 0x3a, 0xec, 0x2c, 0x0c, 0xce, + 0xa9, 0x9a, 0x0e, 0x50, 0x99, 0xfc, 0x3f, 0x89, 0x3f, 0x25, 0x90, 0xcb, 0xf3, 0xa3, 0x17, 0xd0, + 0x5d, 0x07, 0x24, 0x5c, 0x15, 0x12, 0x17, 0xf7, 0xc6, 0x74, 0xf5, 0x3d, 0xe7, 0x98, 0x11, 0x4d, + 0xb7, 0x38, 0x6f, 0x38, 0x7f, 0x0d, 0x4a, 0x0d, 0x46, 0x03, 0x68, 0xdf, 0x91, 0x2d, 0x5f, 0x66, + 0x19, 0xb3, 0x47, 0xf4, 0x15, 0x74, 0x44, 0x84, 0x07, 0xfb, 0x11, 0xee, 0x4e, 0x27, 0x98, 0x2f, + 0x0f, 0xbe, 0x91, 0x34, 0x1b, 0x64, 0xdb, 0x0f, 0x52, 0x31, 0xdf, 0xfb, 0x50, 0xd5, 0x5c, 0xe8, + 0xef, 0x5c, 0x19, 0x42, 0x70, 0xc8, 0xae, 0x2c, 0x17, 0xe6, 0xcf, 0xe8, 0xcb, 0x32, 0xce, 0x03, + 0x9e, 0x45, 0xe3, 0x27, 0x53, 0x8e, 0x54, 0x06, 0xf9, 0x87, 0x04, 0xfd, 0x9d, 0x25, 0xba, 0x57, + 0x76, 0x0c, 0x1d, 0xfe, 0x75, 0xc8, 0x07, 0x7e, 0xf0, 0xbb, 0x80, 0x05, 0x15, 0x7d, 0x0e, 0x1d, + 0xfe, 0x0d, 0x38, 0x6b, 0xbf, 0x7b, 0xfb, 0xb1, 0xa0, 0x68, 0x7f, 0xb5, 0xa1, 0xd7, 0xd8, 0x41, + 0xb4, 0xa8, 0xef, 0xed, 0x32, 0xf4, 0xb3, 0x8c, 0x0f, 0x74, 0x3c, 0xfe, 0xec, 0x9d, 0x7b, 0x5b, + 0x56, 0x13, 0x46, 0xaf, 0x76, 0x98, 0x97, 0xe8, 0x25, 0x80, 0x10, 0xa4, 0x3e, 0xfd, 0x4f, 0xc1, + 0xcb, 0xbc, 0x9f, 0xb1, 0xd1, 0x08, 0x4e, 0x77, 0x86, 0xf1, 0x78, 0x44, 0x6d, 0x1e, 0xd1, 0x49, + 0xc3, 0x88, 0xdd, 0x92, 0xf6, 0xb7, 0x54, 0x9d, 0x47, 0xd8, 0x2b, 0xf0, 0xe8, 0x47, 0xeb, 0x07, + 0x6b, 0xf1, 0x93, 0x35, 0x68, 0xa1, 0xa7, 0x80, 0x1c, 0x5b, 0xc7, 0x8e, 0xe9, 0xb9, 0xa6, 0xe5, + 0x2c, 0xb0, 0xe7, 0xd8, 0xe6, 0x64, 0x20, 0xa1, 0x0f, 0xe1, 0x83, 0x99, 0x65, 0x98, 0x3f, 0x9b, + 0x86, 0xe7, 0xcc, 0x67, 0x13, 0xd3, 0x11, 0x2f, 0x0e, 0x58, 0x03, 0xd6, 0x5f, 0xbd, 0x32, 0x8d, + 0x46, 0x43, 0x1b, 0x3d, 0x81, 0x93, 0x1c, 0xd0, 0x31, 0xd6, 0x7f, 0x11, 0xf0, 0x21, 0x83, 0x0d, + 0xdd, 0xd5, 0x3d, 0xf6, 0xc7, 0x31, 0x5d, 0x01, 0x77, 0x98, 0x0a, 0x87, 0x67, 0xae, 0x89, 0x75, + 0xb7, 0x50, 0xe9, 0xa2, 0x13, 0xe8, 0x2d, 0x6c, 0x77, 0xb6, 0xb0, 0xf4, 0xb9, 0x80, 0x1e, 0xa1, + 0x53, 0x18, 0xd8, 0x26, 0xf6, 0xb0, 0x69, 0xcf, 0x67, 0x13, 0x5d, 0xa0, 0x8f, 0x6f, 0xbb, 0xfc, + 0xdf, 0xe1, 0xf5, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x22, 0xd0, 0x55, 0x98, 0x07, 0x00, + 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/struct.proto b/executor/proto/tensorflow/core/protobuf/struct.proto new file mode 100644 index 0000000000..ecf48776c5 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/struct.proto @@ -0,0 +1,134 @@ +syntax = "proto3"; + +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/types.proto"; + +package tensorflow; + +// `StructuredValue` represents a dynamically typed value representing various +// data structures that are inspired by Python data structures typically used in +// TensorFlow functions as inputs and outputs. +// +// For example when saving a Layer there may be a `training` argument. If the +// user passes a boolean True/False, that switches between two concrete +// TensorFlow functions. In order to switch between them in the same way after +// loading the SavedModel, we need to represent "True" and "False". +// +// A more advanced example might be a function which takes a list of +// dictionaries mapping from strings to Tensors. In order to map from +// user-specified arguments `[{"a": tf.constant(1.)}, {"q": tf.constant(3.)}]` +// after load to the right saved TensorFlow function, we need to represent the +// nested structure and the strings, recording that we have a trace for anything +// matching `[{"a": tf.TensorSpec(None, tf.float32)}, {"q": tf.TensorSpec([], +// tf.float64)}]` as an example. +// +// Likewise functions may return nested structures of Tensors, for example +// returning a dictionary mapping from strings to Tensors. In order for the +// loaded function to return the same structure we need to serialize it. +// +// This is an ergonomic aid for working with loaded SavedModels, not a promise +// to serialize all possible function signatures. For example we do not expect +// to pickle generic Python objects, and ideally we'd stay language-agnostic. +message StructuredValue { + // The kind of value. + oneof kind { + // Represents None. + NoneValue none_value = 1; + + // Represents a double-precision floating-point value (a Python `float`). + double float64_value = 11; + // Represents a signed integer value, limited to 64 bits. + // Larger values from Python's arbitrary-precision integers are unsupported. + sint64 int64_value = 12; + // Represents a string of Unicode characters stored in a Python `str`. + // In Python 3, this is exactly what type `str` is. + // In Python 2, this is the UTF-8 encoding of the characters. + // For strings with ASCII characters only (as often used in TensorFlow code) + // there is effectively no difference between the language versions. + // The obsolescent `unicode` type of Python 2 is not supported here. + string string_value = 13; + // Represents a boolean value. + bool bool_value = 14; + + // Represents a TensorShape. + tensorflow.TensorShapeProto tensor_shape_value = 31; + // Represents an enum value for dtype. + tensorflow.DataType tensor_dtype_value = 32; + // Represents a value for tf.TensorSpec. + TensorSpecProto tensor_spec_value = 33; + // Represents a value for tf.TypeSpec. + TypeSpecProto type_spec_value = 34; + + // Represents a list of `Value`. + ListValue list_value = 51; + // Represents a tuple of `Value`. + TupleValue tuple_value = 52; + // Represents a dict `Value`. + DictValue dict_value = 53; + // Represents Python's namedtuple. + NamedTupleValue named_tuple_value = 54; + } +} + +// Represents None. +message NoneValue {} + +// Represents a Python list. +message ListValue { + repeated StructuredValue values = 1; +} + +// Represents a Python tuple. +message TupleValue { + repeated StructuredValue values = 1; +} + +// Represents a Python dict keyed by `str`. +// The comment on Unicode from Value.string_value applies analogously. +message DictValue { + map fields = 1; +} + +// Represents a (key, value) pair. +message PairValue { + string key = 1; + StructuredValue value = 2; +} + +// Represents Python's namedtuple. +message NamedTupleValue { + string name = 1; + repeated PairValue values = 2; +} + +// A protobuf to tf.TensorSpec. +message TensorSpecProto { + string name = 1; + tensorflow.TensorShapeProto shape = 2; + tensorflow.DataType dtype = 3; +} + +// Represents a tf.TypeSpec +message TypeSpecProto { + enum TypeSpecClass { + UNKNOWN = 0; + SPARSE_TENSOR_SPEC = 1; // tf.SparseTensorSpec + INDEXED_SLICES_SPEC = 2; // tf.IndexedSlicesSpec + RAGGED_TENSOR_SPEC = 3; // tf.RaggedTensorSpec + TENSOR_ARRAY_SPEC = 4; // tf.TensorArraySpec + DATA_DATASET_SPEC = 5; // tf.data.DatasetSpec + DATA_ITERATOR_SPEC = 6; // IteratorSpec from data/ops/iterator_ops.py + OPTIONAL_SPEC = 7; // tf.OptionalSpec + PER_REPLICA_SPEC = 8; // PerReplicaSpec from distribute/values.py + } + TypeSpecClass type_spec_class = 1; + + // The value returned by TypeSpec._serialize(). + StructuredValue type_state = 2; + + // This is currently redundant with the type_spec_class enum, and is only + // used for error reporting. In particular, if you use an older binary to + // load a newer model, and the model uses a TypeSpecClass that the older + // binary doesn't support, then this lets us display a useful error message. + string type_spec_class_name = 3; +} diff --git a/executor/proto/tensorflow/core/protobuf/tensor_bundle.pb.go b/executor/proto/tensorflow/core/protobuf/tensor_bundle.pb.go new file mode 100644 index 0000000000..32c6e194e2 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/tensor_bundle.pb.go @@ -0,0 +1,256 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/tensor_bundle.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + framework "github.com/tensorflow/tensorflow/tensorflow/go/core/framework" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// An enum indicating the endianness of the platform that produced this +// bundle. A bundle can only be read by a platform with matching endianness. +// Defaults to LITTLE, as most modern platforms are little-endian. +// +// Affects the binary tensor data bytes only, not the metadata in protobufs. +type BundleHeaderProto_Endianness int32 + +const ( + BundleHeaderProto_LITTLE BundleHeaderProto_Endianness = 0 + BundleHeaderProto_BIG BundleHeaderProto_Endianness = 1 +) + +var BundleHeaderProto_Endianness_name = map[int32]string{ + 0: "LITTLE", + 1: "BIG", +} + +var BundleHeaderProto_Endianness_value = map[string]int32{ + "LITTLE": 0, + "BIG": 1, +} + +func (x BundleHeaderProto_Endianness) String() string { + return proto.EnumName(BundleHeaderProto_Endianness_name, int32(x)) +} + +func (BundleHeaderProto_Endianness) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9ab648e6509929dc, []int{0, 0} +} + +// Special header that is associated with a bundle. +// +// TODO(zongheng,zhifengc): maybe in the future, we can add information about +// which binary produced this checkpoint, timestamp, etc. Sometime, these can be +// valuable debugging information. And if needed, these can be used as defensive +// information ensuring reader (binary version) of the checkpoint and the writer +// (binary version) must match within certain range, etc. +type BundleHeaderProto struct { + // Number of data files in the bundle. + NumShards int32 `protobuf:"varint,1,opt,name=num_shards,json=numShards,proto3" json:"num_shards,omitempty"` + Endianness BundleHeaderProto_Endianness `protobuf:"varint,2,opt,name=endianness,proto3,enum=tensorflow.BundleHeaderProto_Endianness" json:"endianness,omitempty"` + // Versioning of the tensor bundle format. + Version *framework.VersionDef `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BundleHeaderProto) Reset() { *m = BundleHeaderProto{} } +func (m *BundleHeaderProto) String() string { return proto.CompactTextString(m) } +func (*BundleHeaderProto) ProtoMessage() {} +func (*BundleHeaderProto) Descriptor() ([]byte, []int) { + return fileDescriptor_9ab648e6509929dc, []int{0} +} + +func (m *BundleHeaderProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BundleHeaderProto.Unmarshal(m, b) +} +func (m *BundleHeaderProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BundleHeaderProto.Marshal(b, m, deterministic) +} +func (m *BundleHeaderProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_BundleHeaderProto.Merge(m, src) +} +func (m *BundleHeaderProto) XXX_Size() int { + return xxx_messageInfo_BundleHeaderProto.Size(m) +} +func (m *BundleHeaderProto) XXX_DiscardUnknown() { + xxx_messageInfo_BundleHeaderProto.DiscardUnknown(m) +} + +var xxx_messageInfo_BundleHeaderProto proto.InternalMessageInfo + +func (m *BundleHeaderProto) GetNumShards() int32 { + if m != nil { + return m.NumShards + } + return 0 +} + +func (m *BundleHeaderProto) GetEndianness() BundleHeaderProto_Endianness { + if m != nil { + return m.Endianness + } + return BundleHeaderProto_LITTLE +} + +func (m *BundleHeaderProto) GetVersion() *framework.VersionDef { + if m != nil { + return m.Version + } + return nil +} + +// Describes the metadata related to a checkpointed tensor. +type BundleEntryProto struct { + // The tensor dtype and shape. + Dtype framework.DataType `protobuf:"varint,1,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"` + Shape *framework.TensorShapeProto `protobuf:"bytes,2,opt,name=shape,proto3" json:"shape,omitempty"` + // The binary content of the tensor lies in: + // File "shard_id": bytes [offset, offset + size). + ShardId int32 `protobuf:"varint,3,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` + Size int64 `protobuf:"varint,5,opt,name=size,proto3" json:"size,omitempty"` + // The CRC32C checksum of the tensor bytes. + Crc32C uint32 `protobuf:"fixed32,6,opt,name=crc32c,proto3" json:"crc32c,omitempty"` + // Iff present, this entry represents a partitioned tensor. The previous + // fields are interpreted as follows: + // + // "dtype", "shape": describe the full tensor. + // "shard_id", "offset", "size", "crc32c": all IGNORED. + // These information for each slice can be looked up in their own + // BundleEntryProto, keyed by each "slice_name". + Slices []*framework.TensorSliceProto `protobuf:"bytes,7,rep,name=slices,proto3" json:"slices,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BundleEntryProto) Reset() { *m = BundleEntryProto{} } +func (m *BundleEntryProto) String() string { return proto.CompactTextString(m) } +func (*BundleEntryProto) ProtoMessage() {} +func (*BundleEntryProto) Descriptor() ([]byte, []int) { + return fileDescriptor_9ab648e6509929dc, []int{1} +} + +func (m *BundleEntryProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BundleEntryProto.Unmarshal(m, b) +} +func (m *BundleEntryProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BundleEntryProto.Marshal(b, m, deterministic) +} +func (m *BundleEntryProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_BundleEntryProto.Merge(m, src) +} +func (m *BundleEntryProto) XXX_Size() int { + return xxx_messageInfo_BundleEntryProto.Size(m) +} +func (m *BundleEntryProto) XXX_DiscardUnknown() { + xxx_messageInfo_BundleEntryProto.DiscardUnknown(m) +} + +var xxx_messageInfo_BundleEntryProto proto.InternalMessageInfo + +func (m *BundleEntryProto) GetDtype() framework.DataType { + if m != nil { + return m.Dtype + } + return framework.DataType_DT_INVALID +} + +func (m *BundleEntryProto) GetShape() *framework.TensorShapeProto { + if m != nil { + return m.Shape + } + return nil +} + +func (m *BundleEntryProto) GetShardId() int32 { + if m != nil { + return m.ShardId + } + return 0 +} + +func (m *BundleEntryProto) GetOffset() int64 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *BundleEntryProto) GetSize() int64 { + if m != nil { + return m.Size + } + return 0 +} + +func (m *BundleEntryProto) GetCrc32C() uint32 { + if m != nil { + return m.Crc32C + } + return 0 +} + +func (m *BundleEntryProto) GetSlices() []*framework.TensorSliceProto { + if m != nil { + return m.Slices + } + return nil +} + +func init() { + proto.RegisterEnum("tensorflow.BundleHeaderProto_Endianness", BundleHeaderProto_Endianness_name, BundleHeaderProto_Endianness_value) + proto.RegisterType((*BundleHeaderProto)(nil), "tensorflow.BundleHeaderProto") + proto.RegisterType((*BundleEntryProto)(nil), "tensorflow.BundleEntryProto") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/tensor_bundle.proto", fileDescriptor_9ab648e6509929dc) +} + +var fileDescriptor_9ab648e6509929dc = []byte{ + // 428 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xdf, 0x6b, 0xd4, 0x40, + 0x10, 0xc7, 0xdd, 0xa6, 0x49, 0x74, 0x0a, 0xe5, 0x5c, 0xa5, 0x44, 0x51, 0x88, 0x07, 0x42, 0x10, + 0x49, 0x24, 0xf5, 0xd1, 0xa7, 0xd0, 0xc3, 0x1e, 0xf4, 0xa1, 0x6c, 0x83, 0x0f, 0xbe, 0x94, 0xfc, + 0xd8, 0x5c, 0x83, 0xc9, 0x6e, 0xd8, 0x4d, 0x2c, 0xe7, 0x3f, 0xe0, 0xdf, 0xe7, 0x7f, 0xe3, 0xa3, + 0x64, 0x36, 0xe7, 0x45, 0x6a, 0xc5, 0xb7, 0x9d, 0x99, 0xcf, 0x77, 0x76, 0xbe, 0x93, 0x0d, 0xbc, + 0xed, 0xb9, 0xd0, 0x52, 0x55, 0x8d, 0xbc, 0x8d, 0x0a, 0xa9, 0x78, 0xd4, 0x29, 0xd9, 0xcb, 0x7c, + 0xa8, 0x22, 0x53, 0xb8, 0xce, 0x07, 0x51, 0x36, 0x3c, 0xc4, 0x34, 0x85, 0x3d, 0xfd, 0xfc, 0x8e, + 0xb2, 0x52, 0x59, 0xcb, 0x6f, 0xa5, 0xfa, 0xb2, 0x93, 0xea, 0x9b, 0xac, 0x9b, 0x94, 0xff, 0x43, + 0x37, 0x75, 0xb1, 0xa3, 0x5f, 0xff, 0x83, 0xde, 0x76, 0x5c, 0x4f, 0x58, 0x70, 0x3f, 0xf6, 0x95, + 0x2b, 0x5d, 0x4b, 0x31, 0x91, 0xcb, 0x1f, 0x04, 0x1e, 0x27, 0xe8, 0xe4, 0x9c, 0x67, 0x25, 0x57, + 0x97, 0x68, 0xe7, 0x25, 0x80, 0x18, 0xda, 0x71, 0x4e, 0x55, 0x6a, 0x8f, 0xf8, 0x24, 0xb0, 0xd9, + 0x23, 0x31, 0xb4, 0x57, 0x98, 0xa0, 0xe7, 0x00, 0x5c, 0x94, 0x75, 0x26, 0x04, 0xd7, 0xda, 0x3b, + 0xf0, 0x49, 0x70, 0x1c, 0x07, 0xe1, 0xfe, 0xce, 0xf0, 0x4e, 0xc7, 0x70, 0xf5, 0x9b, 0x67, 0x33, + 0x2d, 0x7d, 0x07, 0xee, 0x34, 0x90, 0x67, 0xf9, 0x24, 0x38, 0x8a, 0x4f, 0xe6, 0x6d, 0x3e, 0x99, + 0xd2, 0x19, 0xaf, 0xd8, 0x0e, 0x5b, 0xbe, 0x02, 0xd8, 0xf7, 0xa2, 0x00, 0xce, 0xc5, 0x3a, 0x4d, + 0x2f, 0x56, 0x8b, 0x07, 0xd4, 0x05, 0x2b, 0x59, 0x7f, 0x5c, 0x90, 0xe5, 0xf7, 0x03, 0x58, 0x98, + 0x09, 0x56, 0xa2, 0x57, 0x5b, 0x63, 0xe9, 0x0d, 0xd8, 0xe5, 0xb8, 0x22, 0x74, 0x73, 0x1c, 0x3f, + 0x9d, 0xdf, 0x73, 0x96, 0xf5, 0x59, 0xba, 0xed, 0x38, 0x33, 0x08, 0x8d, 0xc1, 0xc6, 0x4f, 0x84, + 0xd6, 0x8e, 0xe2, 0x17, 0x73, 0x36, 0xc5, 0xe3, 0xd5, 0x58, 0xc6, 0xc6, 0xcc, 0xa0, 0xf4, 0x19, + 0x3c, 0xc4, 0x75, 0x5d, 0xd7, 0x25, 0x5a, 0xb1, 0x99, 0x8b, 0xf1, 0xba, 0xa4, 0x27, 0xe0, 0xc8, + 0xaa, 0xd2, 0xbc, 0xf7, 0x0e, 0x7d, 0x12, 0x58, 0x6c, 0x8a, 0x28, 0x85, 0x43, 0x5d, 0x7f, 0xe3, + 0x9e, 0x8d, 0x59, 0x3c, 0x8f, 0x6c, 0xa1, 0x8a, 0xd3, 0xb8, 0xf0, 0x1c, 0x9f, 0x04, 0x2e, 0x9b, + 0x22, 0xfa, 0x1e, 0x1c, 0x7c, 0x07, 0xda, 0x73, 0x7d, 0xeb, 0x9e, 0x99, 0xc6, 0xba, 0x99, 0x69, + 0x62, 0x93, 0x06, 0x9e, 0x48, 0xb5, 0x99, 0xa3, 0x43, 0x5f, 0x37, 0x09, 0x35, 0x02, 0xb3, 0x23, + 0x54, 0xe8, 0x4b, 0xf2, 0xf9, 0xc3, 0xa6, 0xee, 0x6f, 0x86, 0x3c, 0x2c, 0x64, 0x1b, 0xcd, 0xde, + 0xcf, 0xdf, 0x8f, 0x1b, 0xf9, 0xe7, 0x5f, 0xf1, 0x93, 0x90, 0xdc, 0xc1, 0xe0, 0xf4, 0x57, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x92, 0x82, 0xc8, 0x56, 0x3b, 0x03, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/tensor_bundle.proto b/executor/proto/tensorflow/core/protobuf/tensor_bundle.proto new file mode 100644 index 0000000000..681c01bbbd --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/tensor_bundle.proto @@ -0,0 +1,64 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "TensorBundleProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.util"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf"; +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/tensor_slice.proto"; +import "tensorflow/core/framework/types.proto"; +import "tensorflow/core/framework/versions.proto"; + +// Protos used in the tensor bundle module (tf/core/util/tensor_bundle/). + +// Special header that is associated with a bundle. +// +// TODO(zongheng,zhifengc): maybe in the future, we can add information about +// which binary produced this checkpoint, timestamp, etc. Sometime, these can be +// valuable debugging information. And if needed, these can be used as defensive +// information ensuring reader (binary version) of the checkpoint and the writer +// (binary version) must match within certain range, etc. +message BundleHeaderProto { + // Number of data files in the bundle. + int32 num_shards = 1; + + // An enum indicating the endianness of the platform that produced this + // bundle. A bundle can only be read by a platform with matching endianness. + // Defaults to LITTLE, as most modern platforms are little-endian. + // + // Affects the binary tensor data bytes only, not the metadata in protobufs. + enum Endianness { + LITTLE = 0; + BIG = 1; + } + Endianness endianness = 2; + + // Versioning of the tensor bundle format. + VersionDef version = 3; +} + +// Describes the metadata related to a checkpointed tensor. +message BundleEntryProto { + // The tensor dtype and shape. + DataType dtype = 1; + TensorShapeProto shape = 2; + // The binary content of the tensor lies in: + // File "shard_id": bytes [offset, offset + size). + int32 shard_id = 3; + int64 offset = 4; + int64 size = 5; + + // The CRC32C checksum of the tensor bytes. + fixed32 crc32c = 6; + + // Iff present, this entry represents a partitioned tensor. The previous + // fields are interpreted as follows: + // + // "dtype", "shape": describe the full tensor. + // "shard_id", "offset", "size", "crc32c": all IGNORED. + // These information for each slice can be looked up in their own + // BundleEntryProto, keyed by each "slice_name". + repeated TensorSliceProto slices = 7; +} diff --git a/executor/proto/tensorflow/core/protobuf/tensorflow_server.pb.go b/executor/proto/tensorflow/core/protobuf/tensorflow_server.pb.go new file mode 100644 index 0000000000..726f587385 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/tensorflow_server.pb.go @@ -0,0 +1,137 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/tensorflow_server.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Defines the configuration of a single TensorFlow server. +type ServerDef struct { + // The cluster of which this server is a member. + Cluster *ClusterDef `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + // The name of the job of which this server is a member. + // + // NOTE(mrry): The `cluster` field must contain a `JobDef` with a `name` field + // that matches this name. + JobName string `protobuf:"bytes,2,opt,name=job_name,json=jobName,proto3" json:"job_name,omitempty"` + // The task index of this server in its job. + // + // NOTE: The `cluster` field must contain a `JobDef` with a matching `name` + // and a mapping in its `tasks` field for this index. + TaskIndex int32 `protobuf:"varint,3,opt,name=task_index,json=taskIndex,proto3" json:"task_index,omitempty"` + // The default configuration for sessions that run on this server. + DefaultSessionConfig *ConfigProto `protobuf:"bytes,4,opt,name=default_session_config,json=defaultSessionConfig,proto3" json:"default_session_config,omitempty"` + // The protocol to be used by this server. + // + // Acceptable values include: "grpc", "grpc+verbs". + Protocol string `protobuf:"bytes,5,opt,name=protocol,proto3" json:"protocol,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerDef) Reset() { *m = ServerDef{} } +func (m *ServerDef) String() string { return proto.CompactTextString(m) } +func (*ServerDef) ProtoMessage() {} +func (*ServerDef) Descriptor() ([]byte, []int) { + return fileDescriptor_7f0f8cbd85b669e4, []int{0} +} + +func (m *ServerDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServerDef.Unmarshal(m, b) +} +func (m *ServerDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServerDef.Marshal(b, m, deterministic) +} +func (m *ServerDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerDef.Merge(m, src) +} +func (m *ServerDef) XXX_Size() int { + return xxx_messageInfo_ServerDef.Size(m) +} +func (m *ServerDef) XXX_DiscardUnknown() { + xxx_messageInfo_ServerDef.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerDef proto.InternalMessageInfo + +func (m *ServerDef) GetCluster() *ClusterDef { + if m != nil { + return m.Cluster + } + return nil +} + +func (m *ServerDef) GetJobName() string { + if m != nil { + return m.JobName + } + return "" +} + +func (m *ServerDef) GetTaskIndex() int32 { + if m != nil { + return m.TaskIndex + } + return 0 +} + +func (m *ServerDef) GetDefaultSessionConfig() *ConfigProto { + if m != nil { + return m.DefaultSessionConfig + } + return nil +} + +func (m *ServerDef) GetProtocol() string { + if m != nil { + return m.Protocol + } + return "" +} + +func init() { + proto.RegisterType((*ServerDef)(nil), "tensorflow.ServerDef") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/tensorflow_server.proto", fileDescriptor_7f0f8cbd85b669e4) +} + +var fileDescriptor_7f0f8cbd85b669e4 = []byte{ + // 290 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x3f, 0x4f, 0xf3, 0x30, + 0x10, 0xc6, 0xe5, 0xf7, 0xa5, 0xb4, 0x35, 0x4c, 0x16, 0x2a, 0x21, 0x12, 0x52, 0x84, 0x04, 0xca, + 0x94, 0x54, 0xb0, 0x32, 0x95, 0x2e, 0x0c, 0xa0, 0x2a, 0xdd, 0x58, 0xa2, 0xfc, 0x39, 0x07, 0x97, + 0xc4, 0x87, 0x6c, 0x07, 0xf8, 0xbc, 0x7c, 0x0a, 0x46, 0x14, 0xbb, 0x25, 0xed, 0x90, 0x2d, 0x77, + 0xbf, 0x7b, 0x9e, 0xbb, 0x27, 0xa6, 0x73, 0x03, 0x52, 0xa3, 0xe2, 0x35, 0x7e, 0xc6, 0x05, 0x2a, + 0x88, 0xdf, 0x15, 0x1a, 0xcc, 0x5b, 0x1e, 0xf7, 0x20, 0xd5, 0xa0, 0x3e, 0x40, 0x45, 0x16, 0x31, + 0xda, 0x03, 0xff, 0x7a, 0x50, 0x5d, 0xa0, 0xe4, 0xa2, 0x72, 0x12, 0xff, 0x66, 0x78, 0xac, 0x6e, + 0xb5, 0xd9, 0x59, 0x5f, 0x7d, 0x13, 0x3a, 0x5d, 0xdb, 0x5d, 0x4b, 0xe0, 0x6c, 0x4e, 0xc7, 0x5b, + 0xec, 0x91, 0x80, 0x84, 0x27, 0xb7, 0xb3, 0xa8, 0xf7, 0x89, 0x1e, 0x1c, 0x5a, 0x02, 0x4f, 0x76, + 0x63, 0xec, 0x82, 0x4e, 0x36, 0x98, 0xa7, 0x32, 0x6b, 0xc0, 0xfb, 0x17, 0x90, 0x70, 0x9a, 0x8c, + 0x37, 0x98, 0x3f, 0x67, 0x0d, 0xb0, 0x4b, 0x4a, 0x4d, 0xa6, 0xdf, 0x52, 0x21, 0x4b, 0xf8, 0xf2, + 0xfe, 0x07, 0x24, 0x1c, 0x25, 0xd3, 0xae, 0xf3, 0xd8, 0x35, 0xd8, 0x13, 0x9d, 0x95, 0xc0, 0xb3, + 0xb6, 0x36, 0xa9, 0x06, 0xad, 0x05, 0xca, 0xd4, 0x25, 0xf0, 0x8e, 0xec, 0xea, 0xf3, 0x83, 0xd5, + 0x96, 0xac, 0xba, 0x93, 0x93, 0xb3, 0xad, 0x6c, 0xed, 0x54, 0x0e, 0x31, 0x9f, 0x4e, 0x6c, 0xa2, + 0x02, 0x6b, 0x6f, 0x64, 0x0f, 0xf9, 0xab, 0x17, 0x0d, 0xf5, 0x51, 0x55, 0xfb, 0x7e, 0xa5, 0xd0, + 0x46, 0xb5, 0xd2, 0x88, 0x06, 0x16, 0xa7, 0x2e, 0xbf, 0x35, 0xd7, 0x2b, 0xf2, 0x72, 0x5f, 0x09, + 0xf3, 0xda, 0xe6, 0x51, 0x81, 0xcd, 0xde, 0x8b, 0x0c, 0x7c, 0x56, 0x78, 0xf8, 0x7b, 0x7f, 0x08, + 0xc9, 0x8f, 0x6d, 0x71, 0xf7, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x8f, 0xa7, 0x8e, 0xe0, 0xe9, 0x01, + 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/tensorflow_server.proto b/executor/proto/tensorflow/core/protobuf/tensorflow_server.proto new file mode 100644 index 0000000000..2bf48d50e1 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/tensorflow_server.proto @@ -0,0 +1,51 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto3"; + +import "tensorflow/core/protobuf/config.proto"; +import "tensorflow/core/protobuf/cluster.proto"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "ServerProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.distruntime"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf"; +// Defines the configuration of a single TensorFlow server. +message ServerDef { + // The cluster of which this server is a member. + ClusterDef cluster = 1; + + // The name of the job of which this server is a member. + // + // NOTE(mrry): The `cluster` field must contain a `JobDef` with a `name` field + // that matches this name. + string job_name = 2; + + // The task index of this server in its job. + // + // NOTE: The `cluster` field must contain a `JobDef` with a matching `name` + // and a mapping in its `tasks` field for this index. + int32 task_index = 3; + + // The default configuration for sessions that run on this server. + ConfigProto default_session_config = 4; + + // The protocol to be used by this server. + // + // Acceptable values include: "grpc", "grpc+verbs". + string protocol = 5; +} diff --git a/executor/proto/tensorflow/core/protobuf/tpu/compilation_result.proto b/executor/proto/tensorflow/core/protobuf/tpu/compilation_result.proto new file mode 100644 index 0000000000..88585a5bd1 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/tpu/compilation_result.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +option cc_enable_arenas = true; +package tensorflow.tpu; + +import "tensorflow/compiler/xla/service/hlo.proto"; +import "tensorflow/core/lib/core/error_codes.proto"; + +// Describes the result of a TPU compilation. +message CompilationResultProto { + // The error message, if any, returned during compilation. + error.Code status_code = 1; + string status_error_message = 2; + + // HLO proto. + repeated xla.HloProto hlo_protos = 3; +} diff --git a/executor/proto/tensorflow/core/protobuf/tpu/dynamic_padding.proto b/executor/proto/tensorflow/core/protobuf/tpu/dynamic_padding.proto new file mode 100644 index 0000000000..c9ebf18116 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/tpu/dynamic_padding.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +option cc_enable_arenas = true; + +package tensorflow.tpu; + +// A mapping between the dynamic shape dimension of an input and the arg that +// represents the real shape. +message PaddingMap { + // Input arg index with dynamic shapes. + int32 arg_index = 1; + + // The dynamic shape dimension index. + int32 shape_index = 2; + + // The arg index that dynamic dimension maps to, which represents the value + // of the real shape. + int32 padding_arg_index = 3; +} diff --git a/executor/proto/tensorflow/core/protobuf/tpu/optimization_parameters.proto b/executor/proto/tensorflow/core/protobuf/tpu/optimization_parameters.proto new file mode 100644 index 0000000000..f52f7bf7f6 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/tpu/optimization_parameters.proto @@ -0,0 +1,339 @@ +syntax = "proto3"; + +package tensorflow.tpu; + +import "google/protobuf/wrappers.proto"; + +message ClippingLimits { + google.protobuf.FloatValue lower = 1; // -inf if not set + google.protobuf.FloatValue upper = 2; // +inf if not set +} + +// Dynamic learning rate specification in the TPUEmbeddingConfiguration. The +// actual learning rates are provided as a scalar input list to the +// SendTPUEmbeddingGradients Op indexed by their tag specified through the +// following proto. +message DynamicLearningRate { + // For tables where learning rates are dynamically computed and communicated + // to the TPU embedding program, a tag must be specified for the learning + // rate. + // + // The tag must be a non-negative integer. The total number of unique tags + // must be less than or equal to the number of tables in the TPU embedding + // configuration (a table does not specify any tag if it uses a constant + // learning rate, and specifies exactly one tag if it uses dynamic learning + // rates). + // + // All tags in the range [0, number_of_unique_tags) must be present in the TPU + // embedding configuration, i.e. a tag cannot be skipped if a different tag + // numerically greater than it is used in the configuration. + // + // If multiple tables specify the same tag, they *MUST* have + // the same dynamic learning rate, for example, their dynamic learning rate + // could be computed by the same TensorFlow sub-graph. The partitioning of the + // embedding layer would be more optimal if the number_of_unique_tags is as + // *LOW* as possible, i.e., if many tables share the same tag. + // + // The learning_rate input of the SendTPUEmbeddingGradients op is used to + // communicate dynamic learning rates to the TPU embedding program. + // The learning_rate input is a list of scalars where the size of the list is + // equal to the number of unique tags. The learning rate associated with a + // particular tag is specified by populating its corresponding index in the + // list of learning_rate scalars. + int32 tag = 1; +} + +// Source of learning rate to use. +message LearningRate { + oneof learning_rate { + float constant = 1; + DynamicLearningRate dynamic = 2; + } +} + +// Each optimizer's parameter proto has a link to its documentation and CPU +// implementation (if available) for user reference. + +// https://www.tensorflow.org/api_docs/python/tf/train/AdagradOptimizer +// https://github.com/tensorflow/tensorflow/blob/c19e29306ce1777456b2dbb3a14f511edf7883a8/tensorflow/core/kernels/training_ops.cc#L151 +message AdagradParameters { + float initial_accumulator = 1; +} + +// Algorithm in http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf. +message BoundedAdagradParameters { + // Whether to use the updated or the old value of the accumulator when + // computing the effective learning rate. When update_accumulator_first is set + // to True, the updated value of the accumulator is used. + bool update_accumulator_first = 1; + // The max_var_update value to use. Set value to 0 (default) to disable using + // max_var_update to clip the gradient. + float max_var_update = 2; + // The maximum value of the accumulator. Set max_accumulator to 0 (default) + // to disable using max_accumulator to clip the accumulator. + float max_accumulator = 3; +} + +// https://www.tensorflow.org/api_docs/python/tf/train/GradientDescentOptimizer +// https://github.com/tensorflow/tensorflow/blob/c19e29306ce1777456b2dbb3a14f511edf7883a8/tensorflow/core/kernels/training_ops.cc#L423 +message StochasticGradientDescentParameters {} + +// https://www.tensorflow.org/api_docs/python/tf/train/FtrlOptimizer +// https://github.com/tensorflow/tensorflow/blob/c19e29306ce1777456b2dbb3a14f511edf7883a8/tensorflow/core/kernels/training_ops.cc#L192 +message FtrlParameters { + float l1 = 1; + float l2 = 2; + float lr_power = 3; + float initial_accum = 4; + float initial_linear = 5; +} + +// The Adam optimizer does not implement hyper-parameter update; use the dynamic +// learning rate feature instead, setting the learning rate to: +// user learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) +// Here, t is the current timestep. +// +// https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer +// https://github.com/tensorflow/tensorflow/blob/ab51450c817674c8ff08a7ae4f8ac50cdc4bed8b/tensorflow/python/training/adam.py#L54 +// +// Note that the code by default implements the lazy version of Adam +// (https://www.tensorflow.org/api_docs/python/tf/contrib/opt/LazyAdamOptimizer) +// unless the use_non_lazy_adam parameter is set, in which case it implements +// the normal version of Adam that updates all parameters in the embedding +// table, even for entries that are not used in the current minibatch +// (https://www.tensorflow.org/api_docs/python/tf/contrib/opt/AdamOptimizer). If +// use_non_lazy_adam is enabled, gradient accumulation is also required to be +// enabled in order to get correct results; a warning will be printed otherwise +// (which may change to an error in the future). If use_sum_inside_sqrt is set, +// the Adam variable update formula will be changed from m / (sqrt(v) + epsilon) +// to m / sqrt(v + epsilon**2); this option improves the performance of TPU +// training and is not expected to harm model quality. +message AdamParameters { + float beta1 = 3; + float beta2 = 4; + float epsilon = 5; + float initial_m = 6; + float initial_v = 7; + bool use_non_lazy_adam = 8; + bool use_sum_inside_sqrt = 10; +} + +// https://www.tensorflow.org/api_docs/python/tf/train/MomentumOptimizer +// https://github.com/tensorflow/tensorflow/blob/c19e29306ce1777456b2dbb3a14f511edf7883a8/tensorflow/core/kernels/training_ops.cc#L271 +message MomentumParameters { + float momentum = 1; + bool use_nesterov = 2; + float initial_accum = 3; +} + +// https://www.tensorflow.org/api_docs/python/tf/train/RMSPropOptimizer +// https://github.com/tensorflow/tensorflow/blob/c19e29306ce1777456b2dbb3a14f511edf7883a8/tensorflow/core/kernels/training_ops.cc#L356 +message RmsPropParameters { + float rho = 1; + float momentum = 2; + float epsilon = 3; + float initial_ms = 4; + float initial_mom = 5; +} + +// https://www.tensorflow.org/api_docs/python/tf/train/RMSPropOptimizer +// https://github.com/tensorflow/tensorflow/blob/c19e29306ce1777456b2dbb3a14f511edf7883a8/tensorflow/core/kernels/training_ops.cc#L372 +message CenteredRmsPropParameters { + float rho = 1; + float momentum = 2; + float epsilon = 3; + float initial_ms = 4; + float initial_mom = 5; + float initial_mg = 6; +} + +// Variant of algorithm in http://proceedings.mlr.press/v44/shamir15.pdf +message MdlAdagradLightParameters { + float l2 = 1; + float lr_power = 2; + float min_servable_mdl_benefit = 3; + float mdl_mix_in_margin = 4; + float mdl_benefit_rampup_coeff = 5; + float mdl_min_weight = 6; + float benefit_revisit_scale = 7; + float max_event_benefit = 8; + float max_total_benefit = 9; + float mdl_hard_limit = 10; + bool hard_limit_min_benefit = 11; + bool mdl_regularize = 12; + float initial_accumulator = 13; + float initial_weight = 14; + float initial_benefit = 15; +} + +// https://www.tensorflow.org/api_docs/python/tf/train/AdadeltaOptimizer +// https://github.com/tensorflow/tensorflow/blob/c19e29306ce1777456b2dbb3a14f511edf7883a8/tensorflow/core/kernels/training_ops.cc#L68 +message AdadeltaParameters { + float rho = 1; + float epsilon = 2; + float initial_accumulator = 3; + float initial_update = 4; +} + +// https://www.tensorflow.org/api_docs/python/tf/train/ProximalAdagradOptimizer +// https://github.com/tensorflow/tensorflow/blob/c19e29306ce1777456b2dbb3a14f511edf7883a8/tensorflow/core/kernels/training_ops.cc#L164 +message ProximalAdagradParameters { + float l1 = 1; + float l2 = 2; + float initial_accumulator = 3; +} + +// The online Yogi optimizer does not implement hyper-parameter update; use the +// dynamic learning rate feature instead, setting the learning rate to: +// user learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) +// Here, t is the current timestep. +// +// https://papers.nips.cc/paper/8186-adaptive-methods-for-nonconvex-optimization.pdf +// plus some extensions based on FTRL. +// +// Note that the code by default implements the lazy version of online Yogi. +message OnlineYogiParameters { + // The L1 regularization parameter (used analogously to the one in FTRL). + float l1 = 1; + + // The L2 regularization parameter (used analogously to the one in FTRL). + float l2 = 2; + + // \beta_2 from Algorithm 2 in the paper. + float beta2 = 3; + + // Initial value of V variable in paper. + float initial_v = 4; + + // Initial value of linear variable in FTRL. + float initial_linear = 5; + + // x -> copysign(1, x) (i.e., return 1 for an input of +0 rather than 0). + message SignActivation {} + + // x -> tanh(x * 10) + message TanhActivation {} + + // Activation to use to replace sign function in v_t update in Algorithm 2 of + // paper. + oneof activation { + SignActivation sign = 6; + TanhActivation tanh = 7; + } +} + +// Status of using gradient accumulation (doing two passes over the input +// gradients: one to accumulate them into a temporary array and another to apply +// them using the actual optimization algorithm). The extra message is to wrap +// the enum for scoping. +message GradientAccumulationStatus { + // if UNSPECIFIED (default), gradient accumulation is ENABLED. + enum Status { + UNSPECIFIED = 0; + ENABLED = 1; + DISABLED = 2; + } +} + +// Configuration proto for hot ID optimization. This is an experimental feature +// that is currently disabled (by default). +message HotIdReplicationConfiguration { + // Whether to enable or disable hot ID optimization. + // If UNSPECIFIED (default), hot ID optimization is DISABLED. + enum Status { + UNSPECIFIED = 0; + ENABLED = 1; + DISABLED = 2; + } + Status status = 1; +} + +message OptimizationParameters { + // Learning rate used for updating the embedding layer parameters. + LearningRate learning_rate = 13; + reserved 1; // Old learning rate tag. + + // Limits to which to clip the weight values after the backward pass; not + // present means no limits are applied. + ClippingLimits clipping_limits = 2; + + // Limits to which to clip the backward pass gradient before using it for + // updates; not present means no limits are applied. + ClippingLimits gradient_clipping_limits = 7; + + // Amount of weight decay to apply; see weight_decay_optimizers.py for + // details. Almost all optimizers are supported with this option (MDL Adagrad + // Light does not work, and SGD does not behave as expected if it is enabled). + // Although there is no check, users who want weight decay will probably also + // want to enable gradient accumulation as well so that the decay will happen + // once per minibatch. + float weight_decay_factor = 16; + + // Status of using gradient accumulation (doing two passes over the input + // gradients: one to accumulate them into a temporary array and another to + // apply them using the actual optimization algorithm). + GradientAccumulationStatus.Status gradient_accumulation_status = 17; + + // Configuration proto for hot ID replication. This is an experimental + // feature that is currently disabled (by default). + HotIdReplicationConfiguration hot_id_replication_configuration = 18; + + // Optimization algorithm parameters; which field is selected determines which + // algorithm to use. + oneof parameters { + AdagradParameters adagrad = 3; + BoundedAdagradParameters bounded_adagrad = 19; + StochasticGradientDescentParameters stochastic_gradient_descent = 4; + FtrlParameters ftrl = 5; + AdamParameters adam = 6; + MomentumParameters momentum = 8; + RmsPropParameters rms_prop = 9; + CenteredRmsPropParameters centered_rms_prop = 10; + MdlAdagradLightParameters mdl_adagrad_light = 11; + AdadeltaParameters adadelta = 12; + ProximalAdagradParameters proximal_adagrad = 14; + OnlineYogiParameters online_yogi = 20; + } + + reserved 15; // Old use_gradient_accumulation. +} + +// Specification of an optimization algorithm's state variables (both the main +// value vector and any extra accumulators, etc.). This proto is only used +// internally by the TPU software and is not exposed directly to the TF model. +message StateVariableSpecification { + // Parameter name for the state variable. + string name = 1; + + // A normal state variable that should be saved and restored in checkpoints + // and used as an input or output to non-debug TensorFlow ops. + message UserDefined { + // For padding embedding rows, this field specifies the initial value to be + // used. Separate initial values need to be specified for the embeddings and + // any extra accumulators. The initial values should be specified so as to + // maintain two invariants during model training: + // (1) The embedding vector multiplied by zero returns a vector containing + // all zeros. To maintain this invariant, the embedding values should + // never be NaNs or +-infinity. + // (2) Repeatedly applying the optimizer using a gradient vector of all + // zeros does not cause the embeddings or slot variables to become NaNs + // or +-infinity. + // The padding row is looked up when no embedding IDs are present for a + // feature. The semantics of embedding lookup dictate that the output must + // be zero under this scenario. + double padding_initial_value = 1; + } + + // A state variable that should be filled with a constant and normally hidden + // from users (used for intermediate gradients being accumulated, for + // example). + message FillWithConstant { + double initial_value = 1; + } + + // Usage type of this state variable. + oneof usage { + UserDefined user_defined = 2; + FillWithConstant fill_with_constant = 3; + } +} diff --git a/executor/proto/tensorflow/core/protobuf/tpu/topology.proto b/executor/proto/tensorflow/core/protobuf/tpu/topology.proto new file mode 100644 index 0000000000..17064ee5a2 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/tpu/topology.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; + +option cc_enable_arenas = true; + +package tensorflow.tpu; + +// Describes the geometry of a TPU mesh. +message TopologyProto { + // The dimensions of the TPU topology, in cores. Typically, this is a 3D + // topology [x, y, core], where the major dimensions correspond to TPU chips, + // and the minor dimension describes the number of cores on a multicore chip. + repeated int32 mesh_shape = 1; + + // Number of TensorFlow tasks in the cluster. + int32 num_tasks = 2; + + // Number of TPU devices per task. + int32 num_tpu_devices_per_task = 3; + + // A flattened rank 3 int32 array with shape + // [num_tasks, num_tpu_devices_per_task, len(mesh_shape)]. + // `tasks` is the number of tasks in the TPU cluster, `devices` is the number + // of TPU devices per task, and the minor dimension corresponds to a position + // in the TPU mesh topology. Each entry [task, device, axis] gives the + // `axis`-th coordinate in the topology of a task/device pair. + repeated int32 device_coordinates = 4; +} diff --git a/executor/proto/tensorflow/core/protobuf/tpu/tpu_embedding_configuration.proto b/executor/proto/tensorflow/core/protobuf/tpu/tpu_embedding_configuration.proto new file mode 100644 index 0000000000..22be27795c --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/tpu/tpu_embedding_configuration.proto @@ -0,0 +1,95 @@ +syntax = "proto3"; + +package tensorflow.tpu; + +import "tensorflow/core/protobuf/tpu/optimization_parameters.proto"; +import "tensorflow/core/protobuf/tpu/tpu_embedding_output_layout.proto"; + +message TPUEmbeddingConfiguration { + // Description of the various embedding tables. + message TableDescriptor { + // Name of the table. + string name = 1; + // Size of the vocabulary (i.e., number of rows) in the table. + int32 vocabulary_size = 2; + // The embedding dimension (i.e., the width of the embedding table). + int32 dimension = 3; + // Number of features mapped to this table. + int32 num_features = 4; + // Details of the learning algorithm used to update the embedding + // parameters. + OptimizationParameters optimization_parameters = 5; + } + repeated TableDescriptor table_descriptor = 1; + + // Mode. Should the embedding layer program be run for inference (just forward + // pass), training (both forward and backward pass) or just the backward_pass. + enum Mode { + UNSPECIFIED = 0; + INFERENCE = 1; + TRAINING = 2; + BACKWARD_PASS_ONLY = 3; + } + Mode mode = 2; + + // Number of samples in each batch of embedding layer activations sent to + // the TensorCore. + int32 batch_size_per_tensor_core = 3; + + // Number of TPU hosts used for inference/training. + int32 num_hosts = 4; + + // Number of TensorCore used for inference/training. + int32 num_tensor_cores = 5; + + // Sharding strategy of the embedding tables among the hosts. + // If the sharding_strategy is "mod", each id is assigned to host + // "id % num_hosts". For instance, 13 ids are split across 5 hosts as: + // [[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]. + // If the sharding_strategy is "div", ids are assigned to hosts in a + // contiguous manner. In this case, 13 ids are split across 5 hosts as: + // [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]. + // In both the strategies, if the id space does not evenly divide the number + // of hosts, each of the first "table_descriptor.vocabulary_size % num_hosts" + // hosts will be assigned one more id. + // This partitioning strategy exactly follows that in the embedding_lookup + // TensorFlow function at tensorflow/python/ops/embedding_ops.py. + enum ShardingStrategy { + DIV_DEFAULT = 0; + MOD = 1; + } + ShardingStrategy sharding_strategy = 6; + + // This parameter determines if the execution of the sparse core will be + // pipelined with that of the TensorCore. This parameter only affects results + // when mode=TRAINING. If mode=INFERENCE or BACKWARD_PASS_ONLY, this parameter + // does not affect execution and hence, is a don't care value. + // + // false: The execution of the sparse core is not pipelined with that of the + // TensorCore. The forward pass of every step on the sparse core is executed + // only after the backward pass of the previous step is complete. And the + // backward pass on the sparse core is executed only after the embedding + // gradients have been computed on the TensorCore on every step. This ensures + // that the activations on every step observe the gradient updates from the + // previous step on both the sparse core and the TensorCore. + // + // true: The execution of the sparse core is pipelined with that of the + // TensorCore. The forward pass of every step on the sparse core can be + // executed after the forward pass of the previous step is complete without + // waiting for the backward pass. This improves the utilization of the sparse + // core allowing it to process step N+1 while the embedding gradients for step + // N are computed on the TensorCore. The backward pass of every step on the + // sparse core is executed directly after the forward pass for the next step + // is complete. The drawback is that embedding activations for step N+1 do not + // observe the embedding gradient updates from step N. This could affect model + // quality if step N and N+1 involve the same set of embedding IDs. However, + // since the embedding updates are sparse, this is generally not considered a + // problem. + bool pipeline_execution_with_tensor_core = 7; + + // Extended output layout information; if not provided, a compatibility mode + // will use defaults that match the old layout. Providing a value for this + // field is EXPERIMENTAL and most ways of filling it will probably break. Do + // not set it unless you know what you are doing. + TPUEmbeddingOutputLayout output_layout = 8; +} diff --git a/executor/proto/tensorflow/core/protobuf/tpu/tpu_embedding_output_layout.proto b/executor/proto/tensorflow/core/protobuf/tpu/tpu_embedding_output_layout.proto new file mode 100644 index 0000000000..aed30b2f22 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/tpu/tpu_embedding_output_layout.proto @@ -0,0 +1,75 @@ +syntax = "proto3"; + +package tensorflow.tpu; + +// In the comments here, "layout" refers to the top-level EmbeddingOutputLayout +// proto contained in the TPUEmbeddingConfiguration. + +// The embedding output consists of a list of tensors, each specified by an +// EmbeddingOutputTensor proto within the EmbeddingOutputLayout (the "output" +// field). Each table and feature lookup is then placed into some number of +// particular positions within some output tensor (identified by "tensor_index" +// within OutputLocation). The tree of table lookups, feature lookups, and +// output locations is specified by the +// "table(table_id).feature(feature_id).output_location" repeated fields within +// EmbeddingOutputLayout. + +message TPUEmbeddingOutputLayout { + // Location of one copy of the feature's data. + message OutputLocation { + // Which output tensor this copy of the feature will go into. Must be + // between 0 and layout.output_size(). + int32 tensor_index = 1; + + // Offset in dimension 0 for this feature copy. Must be between 0 and + // layout.output(tensor_index).dim0_size_per_sample(). + int32 dim0_offset = 2; + + // Offset in dimension 1 for this feature copy. Must be between 0 and + // layout.output(tensor_index).dim1_size() - table width; repeated or + // partially/fully overlapping values are allowed and results in the same + // range will be summed (with the gradients replicated in the backward + // pass). + int32 dim1_offset = 3; + } + + // Description of the output placement for one feature. + message FeatureDescriptor { + // Typically, only one copy of each feature is used, but multiple are + // allowed and the same data will be copied to all of them (with the + // gradients summed in the backward pass). + repeated OutputLocation output_location = 1; + } + + // Description of the output placement for features of one table. + message TableDescriptor { + // Output locations for each feature loaded from this table. + repeated FeatureDescriptor feature = 1; + } + // Output locations for each feature of each table. + repeated TableDescriptor table = 1; + + // Data layout and shape computation information for a single output tensor. + // Any unused locations in the tensor will be filled with zeros, and + // corresponding gradients will be ignored. + + // Size and layout information for 2-D tensors. + message TwoDOutputTensor { + // Multiplier for output dimension 0 size; used to match legacy format that + // stacks features within a sample in dimension 0. + int32 dim0_size_per_sample = 2; + + // The size (in dimension 1) of this output tensor. + int32 dim1_size = 1; + } + + // Format information for a single output tensor. + message EmbeddingOutputTensor { + oneof output_format { + TwoDOutputTensor two_d = 4; + } + } + + // Shape and layout information for each tensor. + repeated EmbeddingOutputTensor output = 2; +} diff --git a/executor/proto/tensorflow/core/protobuf/trace_events.pb.go b/executor/proto/tensorflow/core/protobuf/trace_events.pb.go new file mode 100644 index 0000000000..d68dacadd2 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/trace_events.pb.go @@ -0,0 +1,319 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/trace_events.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// A 'Trace' contains metadata for the individual traces of a system. +type Trace struct { + // The devices that this trace has information about. Maps from device_id to + // more data about the specific device. + Devices map[uint32]*Device `protobuf:"bytes,1,rep,name=devices,proto3" json:"devices,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // All trace events capturing in the profiling period. + TraceEvents []*TraceEvent `protobuf:"bytes,4,rep,name=trace_events,json=traceEvents,proto3" json:"trace_events,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Trace) Reset() { *m = Trace{} } +func (m *Trace) String() string { return proto.CompactTextString(m) } +func (*Trace) ProtoMessage() {} +func (*Trace) Descriptor() ([]byte, []int) { + return fileDescriptor_7a93bbdc8c2bd3ea, []int{0} +} + +func (m *Trace) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Trace.Unmarshal(m, b) +} +func (m *Trace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Trace.Marshal(b, m, deterministic) +} +func (m *Trace) XXX_Merge(src proto.Message) { + xxx_messageInfo_Trace.Merge(m, src) +} +func (m *Trace) XXX_Size() int { + return xxx_messageInfo_Trace.Size(m) +} +func (m *Trace) XXX_DiscardUnknown() { + xxx_messageInfo_Trace.DiscardUnknown(m) +} + +var xxx_messageInfo_Trace proto.InternalMessageInfo + +func (m *Trace) GetDevices() map[uint32]*Device { + if m != nil { + return m.Devices + } + return nil +} + +func (m *Trace) GetTraceEvents() []*TraceEvent { + if m != nil { + return m.TraceEvents + } + return nil +} + +// A 'device' is a physical entity in the system and is comprised of several +// resources. +type Device struct { + // The name of the device. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The id of this device, unique in a single trace. + DeviceId uint32 `protobuf:"varint,2,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"` + // The resources on this device, keyed by resource_id; + Resources map[uint32]*Resource `protobuf:"bytes,3,rep,name=resources,proto3" json:"resources,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Device) Reset() { *m = Device{} } +func (m *Device) String() string { return proto.CompactTextString(m) } +func (*Device) ProtoMessage() {} +func (*Device) Descriptor() ([]byte, []int) { + return fileDescriptor_7a93bbdc8c2bd3ea, []int{1} +} + +func (m *Device) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Device.Unmarshal(m, b) +} +func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Device.Marshal(b, m, deterministic) +} +func (m *Device) XXX_Merge(src proto.Message) { + xxx_messageInfo_Device.Merge(m, src) +} +func (m *Device) XXX_Size() int { + return xxx_messageInfo_Device.Size(m) +} +func (m *Device) XXX_DiscardUnknown() { + xxx_messageInfo_Device.DiscardUnknown(m) +} + +var xxx_messageInfo_Device proto.InternalMessageInfo + +func (m *Device) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Device) GetDeviceId() uint32 { + if m != nil { + return m.DeviceId + } + return 0 +} + +func (m *Device) GetResources() map[uint32]*Resource { + if m != nil { + return m.Resources + } + return nil +} + +// A 'resource' generally is a specific computation component on a device. These +// can range from threads on CPUs to specific arithmetic units on hardware +// devices. +type Resource struct { + // The name of the resource. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The id of the resource. Unique within a device. + ResourceId uint32 `protobuf:"varint,2,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Resource) Reset() { *m = Resource{} } +func (m *Resource) String() string { return proto.CompactTextString(m) } +func (*Resource) ProtoMessage() {} +func (*Resource) Descriptor() ([]byte, []int) { + return fileDescriptor_7a93bbdc8c2bd3ea, []int{2} +} + +func (m *Resource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Resource.Unmarshal(m, b) +} +func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Resource.Marshal(b, m, deterministic) +} +func (m *Resource) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resource.Merge(m, src) +} +func (m *Resource) XXX_Size() int { + return xxx_messageInfo_Resource.Size(m) +} +func (m *Resource) XXX_DiscardUnknown() { + xxx_messageInfo_Resource.DiscardUnknown(m) +} + +var xxx_messageInfo_Resource proto.InternalMessageInfo + +func (m *Resource) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Resource) GetResourceId() uint32 { + if m != nil { + return m.ResourceId + } + return 0 +} + +type TraceEvent struct { + // The id of the device that this event occurred on. The full dataset should + // have this device present in the Trace object. + DeviceId uint32 `protobuf:"varint,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"` + // The id of the resource that this event occurred on. The full dataset should + // have this resource present in the Device object of the Trace object. A + // resource_id is unique on a specific device, but not necessarily within the + // trace. + ResourceId uint32 `protobuf:"varint,2,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` + // The name of this trace event. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // The timestamp that this event occurred at (in picos since tracing started). + TimestampPs uint64 `protobuf:"varint,9,opt,name=timestamp_ps,json=timestampPs,proto3" json:"timestamp_ps,omitempty"` + // The duration of the event in picoseconds if applicable. + // Events without duration are called instant events. + DurationPs uint64 `protobuf:"varint,10,opt,name=duration_ps,json=durationPs,proto3" json:"duration_ps,omitempty"` + // Extra arguments that will be displayed in trace view. + Args map[string]string `protobuf:"bytes,11,rep,name=args,proto3" json:"args,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TraceEvent) Reset() { *m = TraceEvent{} } +func (m *TraceEvent) String() string { return proto.CompactTextString(m) } +func (*TraceEvent) ProtoMessage() {} +func (*TraceEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_7a93bbdc8c2bd3ea, []int{3} +} + +func (m *TraceEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TraceEvent.Unmarshal(m, b) +} +func (m *TraceEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TraceEvent.Marshal(b, m, deterministic) +} +func (m *TraceEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_TraceEvent.Merge(m, src) +} +func (m *TraceEvent) XXX_Size() int { + return xxx_messageInfo_TraceEvent.Size(m) +} +func (m *TraceEvent) XXX_DiscardUnknown() { + xxx_messageInfo_TraceEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_TraceEvent proto.InternalMessageInfo + +func (m *TraceEvent) GetDeviceId() uint32 { + if m != nil { + return m.DeviceId + } + return 0 +} + +func (m *TraceEvent) GetResourceId() uint32 { + if m != nil { + return m.ResourceId + } + return 0 +} + +func (m *TraceEvent) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *TraceEvent) GetTimestampPs() uint64 { + if m != nil { + return m.TimestampPs + } + return 0 +} + +func (m *TraceEvent) GetDurationPs() uint64 { + if m != nil { + return m.DurationPs + } + return 0 +} + +func (m *TraceEvent) GetArgs() map[string]string { + if m != nil { + return m.Args + } + return nil +} + +func init() { + proto.RegisterType((*Trace)(nil), "tensorflow.profiler.Trace") + proto.RegisterMapType((map[uint32]*Device)(nil), "tensorflow.profiler.Trace.DevicesEntry") + proto.RegisterType((*Device)(nil), "tensorflow.profiler.Device") + proto.RegisterMapType((map[uint32]*Resource)(nil), "tensorflow.profiler.Device.ResourcesEntry") + proto.RegisterType((*Resource)(nil), "tensorflow.profiler.Resource") + proto.RegisterType((*TraceEvent)(nil), "tensorflow.profiler.TraceEvent") + proto.RegisterMapType((map[string]string)(nil), "tensorflow.profiler.TraceEvent.ArgsEntry") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/trace_events.proto", fileDescriptor_7a93bbdc8c2bd3ea) +} + +var fileDescriptor_7a93bbdc8c2bd3ea = []byte{ + // 430 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x53, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0x66, 0x9a, 0xec, 0xba, 0x79, 0xe9, 0x8a, 0x8e, 0x1e, 0x42, 0x17, 0xd9, 0xd8, 0x8b, 0x51, + 0x21, 0xc5, 0xee, 0x41, 0x11, 0x44, 0xb6, 0xb8, 0xe0, 0xde, 0xc2, 0x20, 0x78, 0xf0, 0x50, 0xd2, + 0x66, 0x52, 0x42, 0x9b, 0x4c, 0x99, 0x99, 0xb4, 0xf4, 0xaf, 0xf8, 0xeb, 0xbc, 0xf9, 0x17, 0x3c, + 0xca, 0xcc, 0x98, 0x64, 0x2a, 0xd1, 0xde, 0x1e, 0x5f, 0xbe, 0xf7, 0xbd, 0xf7, 0x7d, 0x2f, 0x03, + 0xaf, 0x25, 0xad, 0x04, 0xe3, 0xf9, 0x86, 0xed, 0x27, 0x4b, 0xc6, 0xe9, 0x64, 0xcb, 0x99, 0x64, + 0x8b, 0x3a, 0x9f, 0x48, 0x9e, 0x2e, 0xe9, 0x9c, 0xee, 0x68, 0x25, 0x45, 0xac, 0x51, 0xfc, 0xa4, + 0x23, 0x2b, 0x24, 0x2f, 0x36, 0x94, 0x8f, 0x7f, 0x22, 0x38, 0xfb, 0xa2, 0xb8, 0xf8, 0x16, 0x1e, + 0x64, 0x74, 0x57, 0x2c, 0xa9, 0x08, 0x50, 0xe8, 0x44, 0xfe, 0xf4, 0x45, 0xdc, 0xd3, 0x10, 0x6b, + 0x72, 0xfc, 0xc9, 0x30, 0xef, 0x2a, 0xc9, 0x0f, 0xa4, 0xe9, 0xc3, 0x33, 0x18, 0xda, 0x73, 0x03, + 0x57, 0xeb, 0x5c, 0xff, 0x5b, 0xe7, 0x4e, 0xf1, 0x88, 0x2f, 0xdb, 0x5a, 0x8c, 0xbe, 0xc2, 0xd0, + 0x16, 0xc7, 0x8f, 0xc0, 0x59, 0xd3, 0x43, 0x80, 0x42, 0x14, 0x5d, 0x12, 0x55, 0xe2, 0x37, 0x70, + 0xb6, 0x4b, 0x37, 0x35, 0x0d, 0x06, 0x21, 0x8a, 0xfc, 0xe9, 0x55, 0xaf, 0xbc, 0xd1, 0x20, 0x86, + 0xf9, 0x7e, 0xf0, 0x0e, 0x8d, 0x7f, 0x20, 0x38, 0x37, 0x28, 0xc6, 0xe0, 0x56, 0x69, 0x49, 0xb5, + 0xa8, 0x47, 0x74, 0x8d, 0xaf, 0xc0, 0x33, 0x36, 0xe6, 0x45, 0xa6, 0x95, 0x2f, 0xc9, 0x85, 0x01, + 0xee, 0x33, 0xfc, 0x19, 0x3c, 0x4e, 0x05, 0xab, 0xb9, 0x4a, 0xc7, 0xd1, 0xae, 0x5e, 0xfd, 0x67, + 0x6c, 0x4c, 0x1a, 0xb2, 0x09, 0xa8, 0x6b, 0x1e, 0x7d, 0x83, 0x87, 0xc7, 0x1f, 0x7b, 0x0c, 0xde, + 0x1c, 0x1b, 0x7c, 0xd6, 0x3b, 0xa9, 0x51, 0xb1, 0x2d, 0x7e, 0x84, 0x8b, 0x06, 0xee, 0xf5, 0x78, + 0x0d, 0x7e, 0xb3, 0x49, 0xe7, 0x12, 0x1a, 0xe8, 0x3e, 0x1b, 0x7f, 0x1f, 0x00, 0x74, 0x87, 0x39, + 0xce, 0x04, 0xfd, 0x95, 0xc9, 0x29, 0xb1, 0x76, 0x03, 0xc7, 0xda, 0xe0, 0x39, 0x0c, 0x65, 0x51, + 0x52, 0x21, 0xd3, 0x72, 0x3b, 0xdf, 0x8a, 0xc0, 0x0b, 0x51, 0xe4, 0x12, 0xbf, 0xc5, 0x12, 0xa1, + 0x74, 0xb3, 0x9a, 0xa7, 0xb2, 0x60, 0x95, 0x62, 0x80, 0x66, 0x40, 0x03, 0x25, 0x02, 0x7f, 0x00, + 0x37, 0xe5, 0x2b, 0x11, 0xf8, 0xfa, 0x0e, 0x2f, 0x4f, 0xfc, 0x5d, 0xf1, 0x2d, 0x5f, 0xfd, 0x39, + 0x83, 0x6e, 0x1b, 0xbd, 0x05, 0xaf, 0x85, 0xec, 0xf0, 0x3d, 0x13, 0xfe, 0x53, 0x3b, 0x7c, 0xcf, + 0x4a, 0x77, 0x36, 0x85, 0x80, 0xf1, 0x95, 0x3d, 0x2e, 0xe7, 0x69, 0x49, 0xf7, 0x8c, 0xaf, 0x67, + 0x8f, 0xbb, 0x81, 0x22, 0x51, 0xaf, 0x4d, 0x24, 0xe8, 0x17, 0x42, 0x8b, 0x73, 0xfd, 0xf4, 0x6e, + 0x7e, 0x07, 0x00, 0x00, 0xff, 0xff, 0xd1, 0x72, 0xbe, 0x49, 0xa9, 0x03, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/trace_events.proto b/executor/proto/tensorflow/core/protobuf/trace_events.proto new file mode 100644 index 0000000000..76b7300aea --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/trace_events.proto @@ -0,0 +1,67 @@ +syntax = "proto3"; + +package tensorflow.profiler; + +option cc_enable_arenas = true; +option java_outer_classname = "TraceEventsProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; + +// A 'Trace' contains metadata for the individual traces of a system. +message Trace { + // The devices that this trace has information about. Maps from device_id to + // more data about the specific device. + map devices = 1; + + // All trace events capturing in the profiling period. + repeated TraceEvent trace_events = 4; +} + +// A 'device' is a physical entity in the system and is comprised of several +// resources. +message Device { + // The name of the device. + string name = 1; + + // The id of this device, unique in a single trace. + uint32 device_id = 2; + + // The resources on this device, keyed by resource_id; + map resources = 3; +} + +// A 'resource' generally is a specific computation component on a device. These +// can range from threads on CPUs to specific arithmetic units on hardware +// devices. +message Resource { + // The name of the resource. + string name = 1; + + // The id of the resource. Unique within a device. + uint32 resource_id = 2; +} + +message TraceEvent { + // The id of the device that this event occurred on. The full dataset should + // have this device present in the Trace object. + uint32 device_id = 1; + + // The id of the resource that this event occurred on. The full dataset should + // have this resource present in the Device object of the Trace object. A + // resource_id is unique on a specific device, but not necessarily within the + // trace. + uint32 resource_id = 2; + + // The name of this trace event. + string name = 3; + + // The timestamp that this event occurred at (in picos since tracing started). + uint64 timestamp_ps = 9; + + // The duration of the event in picoseconds if applicable. + // Events without duration are called instant events. + uint64 duration_ps = 10; + + // Extra arguments that will be displayed in trace view. + map args = 11; +} diff --git a/executor/proto/tensorflow/core/protobuf/trackable_object_graph.pb.go b/executor/proto/tensorflow/core/protobuf/trackable_object_graph.pb.go new file mode 100644 index 0000000000..afe4a7b347 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/trackable_object_graph.pb.go @@ -0,0 +1,357 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/trackable_object_graph.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type TrackableObjectGraph struct { + Nodes []*TrackableObjectGraph_TrackableObject `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TrackableObjectGraph) Reset() { *m = TrackableObjectGraph{} } +func (m *TrackableObjectGraph) String() string { return proto.CompactTextString(m) } +func (*TrackableObjectGraph) ProtoMessage() {} +func (*TrackableObjectGraph) Descriptor() ([]byte, []int) { + return fileDescriptor_120a5309f807e789, []int{0} +} + +func (m *TrackableObjectGraph) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TrackableObjectGraph.Unmarshal(m, b) +} +func (m *TrackableObjectGraph) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TrackableObjectGraph.Marshal(b, m, deterministic) +} +func (m *TrackableObjectGraph) XXX_Merge(src proto.Message) { + xxx_messageInfo_TrackableObjectGraph.Merge(m, src) +} +func (m *TrackableObjectGraph) XXX_Size() int { + return xxx_messageInfo_TrackableObjectGraph.Size(m) +} +func (m *TrackableObjectGraph) XXX_DiscardUnknown() { + xxx_messageInfo_TrackableObjectGraph.DiscardUnknown(m) +} + +var xxx_messageInfo_TrackableObjectGraph proto.InternalMessageInfo + +func (m *TrackableObjectGraph) GetNodes() []*TrackableObjectGraph_TrackableObject { + if m != nil { + return m.Nodes + } + return nil +} + +type TrackableObjectGraph_TrackableObject struct { + // Objects which this object depends on. + Children []*TrackableObjectGraph_TrackableObject_ObjectReference `protobuf:"bytes,1,rep,name=children,proto3" json:"children,omitempty"` + // Serialized data specific to this object. + Attributes []*TrackableObjectGraph_TrackableObject_SerializedTensor `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` + // Slot variables owned by this object. + SlotVariables []*TrackableObjectGraph_TrackableObject_SlotVariableReference `protobuf:"bytes,3,rep,name=slot_variables,json=slotVariables,proto3" json:"slot_variables,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TrackableObjectGraph_TrackableObject) Reset() { *m = TrackableObjectGraph_TrackableObject{} } +func (m *TrackableObjectGraph_TrackableObject) String() string { return proto.CompactTextString(m) } +func (*TrackableObjectGraph_TrackableObject) ProtoMessage() {} +func (*TrackableObjectGraph_TrackableObject) Descriptor() ([]byte, []int) { + return fileDescriptor_120a5309f807e789, []int{0, 0} +} + +func (m *TrackableObjectGraph_TrackableObject) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TrackableObjectGraph_TrackableObject.Unmarshal(m, b) +} +func (m *TrackableObjectGraph_TrackableObject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TrackableObjectGraph_TrackableObject.Marshal(b, m, deterministic) +} +func (m *TrackableObjectGraph_TrackableObject) XXX_Merge(src proto.Message) { + xxx_messageInfo_TrackableObjectGraph_TrackableObject.Merge(m, src) +} +func (m *TrackableObjectGraph_TrackableObject) XXX_Size() int { + return xxx_messageInfo_TrackableObjectGraph_TrackableObject.Size(m) +} +func (m *TrackableObjectGraph_TrackableObject) XXX_DiscardUnknown() { + xxx_messageInfo_TrackableObjectGraph_TrackableObject.DiscardUnknown(m) +} + +var xxx_messageInfo_TrackableObjectGraph_TrackableObject proto.InternalMessageInfo + +func (m *TrackableObjectGraph_TrackableObject) GetChildren() []*TrackableObjectGraph_TrackableObject_ObjectReference { + if m != nil { + return m.Children + } + return nil +} + +func (m *TrackableObjectGraph_TrackableObject) GetAttributes() []*TrackableObjectGraph_TrackableObject_SerializedTensor { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *TrackableObjectGraph_TrackableObject) GetSlotVariables() []*TrackableObjectGraph_TrackableObject_SlotVariableReference { + if m != nil { + return m.SlotVariables + } + return nil +} + +type TrackableObjectGraph_TrackableObject_ObjectReference struct { + // An index into `TrackableObjectGraph.nodes`, indicating the object + // being referenced. + NodeId int32 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // A user-provided name for the edge. + LocalName string `protobuf:"bytes,2,opt,name=local_name,json=localName,proto3" json:"local_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TrackableObjectGraph_TrackableObject_ObjectReference) Reset() { + *m = TrackableObjectGraph_TrackableObject_ObjectReference{} +} +func (m *TrackableObjectGraph_TrackableObject_ObjectReference) String() string { + return proto.CompactTextString(m) +} +func (*TrackableObjectGraph_TrackableObject_ObjectReference) ProtoMessage() {} +func (*TrackableObjectGraph_TrackableObject_ObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_120a5309f807e789, []int{0, 0, 0} +} + +func (m *TrackableObjectGraph_TrackableObject_ObjectReference) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TrackableObjectGraph_TrackableObject_ObjectReference.Unmarshal(m, b) +} +func (m *TrackableObjectGraph_TrackableObject_ObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TrackableObjectGraph_TrackableObject_ObjectReference.Marshal(b, m, deterministic) +} +func (m *TrackableObjectGraph_TrackableObject_ObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_TrackableObjectGraph_TrackableObject_ObjectReference.Merge(m, src) +} +func (m *TrackableObjectGraph_TrackableObject_ObjectReference) XXX_Size() int { + return xxx_messageInfo_TrackableObjectGraph_TrackableObject_ObjectReference.Size(m) +} +func (m *TrackableObjectGraph_TrackableObject_ObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_TrackableObjectGraph_TrackableObject_ObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_TrackableObjectGraph_TrackableObject_ObjectReference proto.InternalMessageInfo + +func (m *TrackableObjectGraph_TrackableObject_ObjectReference) GetNodeId() int32 { + if m != nil { + return m.NodeId + } + return 0 +} + +func (m *TrackableObjectGraph_TrackableObject_ObjectReference) GetLocalName() string { + if m != nil { + return m.LocalName + } + return "" +} + +type TrackableObjectGraph_TrackableObject_SerializedTensor struct { + // A name for the Tensor. Simple variables have only one + // `SerializedTensor` named "VARIABLE_VALUE" by convention. This value may + // be restored on object creation as an optimization. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The full name of the variable/tensor, if applicable. Used to allow + // name-based loading of checkpoints which were saved using an + // object-based API. Should match the checkpoint key which would have been + // assigned by tf.train.Saver. + FullName string `protobuf:"bytes,2,opt,name=full_name,json=fullName,proto3" json:"full_name,omitempty"` + // The generated name of the Tensor in the checkpoint. + CheckpointKey string `protobuf:"bytes,3,opt,name=checkpoint_key,json=checkpointKey,proto3" json:"checkpoint_key,omitempty"` + // Whether checkpoints should be considered as matching even without this + // value restored. Used for non-critical values which don't affect the + // TensorFlow graph, such as layer configurations. + OptionalRestore bool `protobuf:"varint,4,opt,name=optional_restore,json=optionalRestore,proto3" json:"optional_restore,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TrackableObjectGraph_TrackableObject_SerializedTensor) Reset() { + *m = TrackableObjectGraph_TrackableObject_SerializedTensor{} +} +func (m *TrackableObjectGraph_TrackableObject_SerializedTensor) String() string { + return proto.CompactTextString(m) +} +func (*TrackableObjectGraph_TrackableObject_SerializedTensor) ProtoMessage() {} +func (*TrackableObjectGraph_TrackableObject_SerializedTensor) Descriptor() ([]byte, []int) { + return fileDescriptor_120a5309f807e789, []int{0, 0, 1} +} + +func (m *TrackableObjectGraph_TrackableObject_SerializedTensor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TrackableObjectGraph_TrackableObject_SerializedTensor.Unmarshal(m, b) +} +func (m *TrackableObjectGraph_TrackableObject_SerializedTensor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TrackableObjectGraph_TrackableObject_SerializedTensor.Marshal(b, m, deterministic) +} +func (m *TrackableObjectGraph_TrackableObject_SerializedTensor) XXX_Merge(src proto.Message) { + xxx_messageInfo_TrackableObjectGraph_TrackableObject_SerializedTensor.Merge(m, src) +} +func (m *TrackableObjectGraph_TrackableObject_SerializedTensor) XXX_Size() int { + return xxx_messageInfo_TrackableObjectGraph_TrackableObject_SerializedTensor.Size(m) +} +func (m *TrackableObjectGraph_TrackableObject_SerializedTensor) XXX_DiscardUnknown() { + xxx_messageInfo_TrackableObjectGraph_TrackableObject_SerializedTensor.DiscardUnknown(m) +} + +var xxx_messageInfo_TrackableObjectGraph_TrackableObject_SerializedTensor proto.InternalMessageInfo + +func (m *TrackableObjectGraph_TrackableObject_SerializedTensor) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *TrackableObjectGraph_TrackableObject_SerializedTensor) GetFullName() string { + if m != nil { + return m.FullName + } + return "" +} + +func (m *TrackableObjectGraph_TrackableObject_SerializedTensor) GetCheckpointKey() string { + if m != nil { + return m.CheckpointKey + } + return "" +} + +func (m *TrackableObjectGraph_TrackableObject_SerializedTensor) GetOptionalRestore() bool { + if m != nil { + return m.OptionalRestore + } + return false +} + +type TrackableObjectGraph_TrackableObject_SlotVariableReference struct { + // An index into `TrackableObjectGraph.nodes`, indicating the + // variable object this slot was created for. + OriginalVariableNodeId int32 `protobuf:"varint,1,opt,name=original_variable_node_id,json=originalVariableNodeId,proto3" json:"original_variable_node_id,omitempty"` + // The name of the slot (e.g. "m"/"v"). + SlotName string `protobuf:"bytes,2,opt,name=slot_name,json=slotName,proto3" json:"slot_name,omitempty"` + // An index into `TrackableObjectGraph.nodes`, indicating the + // `Object` with the value of the slot variable. + SlotVariableNodeId int32 `protobuf:"varint,3,opt,name=slot_variable_node_id,json=slotVariableNodeId,proto3" json:"slot_variable_node_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TrackableObjectGraph_TrackableObject_SlotVariableReference) Reset() { + *m = TrackableObjectGraph_TrackableObject_SlotVariableReference{} +} +func (m *TrackableObjectGraph_TrackableObject_SlotVariableReference) String() string { + return proto.CompactTextString(m) +} +func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) ProtoMessage() {} +func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) Descriptor() ([]byte, []int) { + return fileDescriptor_120a5309f807e789, []int{0, 0, 2} +} + +func (m *TrackableObjectGraph_TrackableObject_SlotVariableReference) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TrackableObjectGraph_TrackableObject_SlotVariableReference.Unmarshal(m, b) +} +func (m *TrackableObjectGraph_TrackableObject_SlotVariableReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TrackableObjectGraph_TrackableObject_SlotVariableReference.Marshal(b, m, deterministic) +} +func (m *TrackableObjectGraph_TrackableObject_SlotVariableReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_TrackableObjectGraph_TrackableObject_SlotVariableReference.Merge(m, src) +} +func (m *TrackableObjectGraph_TrackableObject_SlotVariableReference) XXX_Size() int { + return xxx_messageInfo_TrackableObjectGraph_TrackableObject_SlotVariableReference.Size(m) +} +func (m *TrackableObjectGraph_TrackableObject_SlotVariableReference) XXX_DiscardUnknown() { + xxx_messageInfo_TrackableObjectGraph_TrackableObject_SlotVariableReference.DiscardUnknown(m) +} + +var xxx_messageInfo_TrackableObjectGraph_TrackableObject_SlotVariableReference proto.InternalMessageInfo + +func (m *TrackableObjectGraph_TrackableObject_SlotVariableReference) GetOriginalVariableNodeId() int32 { + if m != nil { + return m.OriginalVariableNodeId + } + return 0 +} + +func (m *TrackableObjectGraph_TrackableObject_SlotVariableReference) GetSlotName() string { + if m != nil { + return m.SlotName + } + return "" +} + +func (m *TrackableObjectGraph_TrackableObject_SlotVariableReference) GetSlotVariableNodeId() int32 { + if m != nil { + return m.SlotVariableNodeId + } + return 0 +} + +func init() { + proto.RegisterType((*TrackableObjectGraph)(nil), "tensorflow.TrackableObjectGraph") + proto.RegisterType((*TrackableObjectGraph_TrackableObject)(nil), "tensorflow.TrackableObjectGraph.TrackableObject") + proto.RegisterType((*TrackableObjectGraph_TrackableObject_ObjectReference)(nil), "tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference") + proto.RegisterType((*TrackableObjectGraph_TrackableObject_SerializedTensor)(nil), "tensorflow.TrackableObjectGraph.TrackableObject.SerializedTensor") + proto.RegisterType((*TrackableObjectGraph_TrackableObject_SlotVariableReference)(nil), "tensorflow.TrackableObjectGraph.TrackableObject.SlotVariableReference") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/trackable_object_graph.proto", fileDescriptor_120a5309f807e789) +} + +var fileDescriptor_120a5309f807e789 = []byte{ + // 430 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0x4f, 0x6b, 0x13, 0x41, + 0x18, 0xc6, 0x99, 0x6e, 0x13, 0x93, 0x57, 0xda, 0x94, 0xc1, 0xea, 0x9a, 0x22, 0x04, 0x41, 0x88, + 0x97, 0xc4, 0x3f, 0x78, 0xf0, 0xa6, 0x1e, 0x2a, 0x45, 0xa8, 0xb0, 0x16, 0x4f, 0xc2, 0x30, 0x3b, + 0xfb, 0x6e, 0x33, 0x66, 0xb2, 0xb3, 0xcc, 0x4c, 0x94, 0xfa, 0x3d, 0xfc, 0x02, 0x1e, 0xfd, 0x26, + 0x7e, 0x23, 0x8f, 0x65, 0x66, 0xb3, 0xdd, 0xcd, 0x92, 0x4b, 0x6e, 0xc9, 0x33, 0xcf, 0xf3, 0x7b, + 0x79, 0xde, 0x97, 0x85, 0x37, 0x0e, 0x0b, 0xab, 0x4d, 0xae, 0xf4, 0xcf, 0xb9, 0xd0, 0x06, 0xe7, + 0xa5, 0xd1, 0x4e, 0xa7, 0xeb, 0x7c, 0xee, 0x0c, 0x17, 0x4b, 0x9e, 0x2a, 0x64, 0x3a, 0xfd, 0x8e, + 0xc2, 0xb1, 0x6b, 0xc3, 0xcb, 0xc5, 0x2c, 0xbc, 0x53, 0x68, 0x62, 0x4f, 0xff, 0xf6, 0xe1, 0xc1, + 0x55, 0x6d, 0xfe, 0x1c, 0xbc, 0x1f, 0xbd, 0x95, 0x9e, 0x43, 0xaf, 0xd0, 0x19, 0xda, 0x98, 0x4c, + 0xa2, 0xe9, 0xfd, 0x57, 0x2f, 0x66, 0x4d, 0x68, 0xb6, 0x2b, 0xd0, 0x15, 0x93, 0x2a, 0x3e, 0xfe, + 0xd7, 0x83, 0x51, 0xe7, 0x89, 0x7e, 0x83, 0x81, 0x58, 0x48, 0x95, 0x19, 0x2c, 0x36, 0xf8, 0x77, + 0xfb, 0xe2, 0x67, 0x9b, 0x29, 0x98, 0xa3, 0xc1, 0x42, 0x60, 0x72, 0x47, 0xa4, 0x1c, 0x80, 0x3b, + 0x67, 0x64, 0xba, 0x76, 0x68, 0xe3, 0x83, 0xc0, 0x7f, 0xbf, 0x37, 0xff, 0x0b, 0x1a, 0xc9, 0x95, + 0xfc, 0x85, 0xd9, 0x55, 0x48, 0x26, 0x2d, 0x28, 0x5d, 0xc1, 0xb1, 0x55, 0xda, 0xb1, 0x1f, 0xdc, + 0x48, 0x9f, 0xb1, 0x71, 0x14, 0xc6, 0x9c, 0xef, 0x3f, 0x46, 0x69, 0xf7, 0x75, 0x43, 0x69, 0xca, + 0x1c, 0xd9, 0x96, 0x6c, 0xc7, 0x17, 0x30, 0xea, 0xd4, 0xa5, 0x8f, 0xe0, 0x9e, 0xdf, 0x2f, 0x93, + 0x59, 0x4c, 0x26, 0x64, 0xda, 0x4b, 0xfa, 0xfe, 0xef, 0x45, 0x46, 0x9f, 0x00, 0x28, 0x2d, 0xb8, + 0x62, 0x05, 0x5f, 0x61, 0x7c, 0x30, 0x21, 0xd3, 0x61, 0x32, 0x0c, 0xca, 0x25, 0x5f, 0xe1, 0xf8, + 0x37, 0x81, 0x93, 0x6e, 0x35, 0x4a, 0xe1, 0x30, 0xb8, 0x49, 0x70, 0x87, 0xdf, 0xf4, 0x0c, 0x86, + 0xf9, 0x5a, 0x6d, 0x61, 0x06, 0x5e, 0xf0, 0x14, 0xfa, 0x0c, 0x8e, 0xc5, 0x02, 0xc5, 0xb2, 0xd4, + 0xb2, 0x70, 0x6c, 0x89, 0x37, 0x71, 0x14, 0x1c, 0x47, 0x8d, 0xfa, 0x09, 0x6f, 0xe8, 0x73, 0x38, + 0xd1, 0xa5, 0x93, 0xba, 0xe0, 0x8a, 0x19, 0xb4, 0x4e, 0x1b, 0x8c, 0x0f, 0x27, 0x64, 0x3a, 0x48, + 0x46, 0xb5, 0x9e, 0x54, 0xf2, 0xf8, 0x0f, 0x81, 0xd3, 0x9d, 0xbb, 0xa0, 0x6f, 0xe1, 0xb1, 0x36, + 0xf2, 0x5a, 0x7a, 0x48, 0xbd, 0x6f, 0xb6, 0xdd, 0xfd, 0x61, 0x6d, 0xa8, 0xd3, 0x97, 0xd5, 0x2e, + 0xce, 0x60, 0x18, 0xce, 0xd4, 0xee, 0xe0, 0x85, 0xd0, 0xe1, 0x25, 0x9c, 0x6e, 0xdd, 0xf0, 0x8e, + 0x19, 0x05, 0x26, 0x6d, 0x9f, 0xa0, 0xe2, 0x7d, 0x88, 0xfe, 0x13, 0x92, 0xf6, 0xc3, 0x47, 0xf4, + 0xfa, 0x36, 0x00, 0x00, 0xff, 0xff, 0x38, 0x69, 0x1f, 0x59, 0x7d, 0x03, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/trackable_object_graph.proto b/executor/proto/tensorflow/core/protobuf/trackable_object_graph.proto new file mode 100644 index 0000000000..02d852e6f3 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/trackable_object_graph.proto @@ -0,0 +1,59 @@ +syntax = "proto3"; + +option cc_enable_arenas = true; + +package tensorflow; + +// A TensorBundle addition which saves extra information about the objects which +// own variables, allowing for more robust checkpoint loading into modified +// programs. + +message TrackableObjectGraph { + message TrackableObject { + message ObjectReference { + // An index into `TrackableObjectGraph.nodes`, indicating the object + // being referenced. + int32 node_id = 1; + // A user-provided name for the edge. + string local_name = 2; + } + + message SerializedTensor { + // A name for the Tensor. Simple variables have only one + // `SerializedTensor` named "VARIABLE_VALUE" by convention. This value may + // be restored on object creation as an optimization. + string name = 1; + // The full name of the variable/tensor, if applicable. Used to allow + // name-based loading of checkpoints which were saved using an + // object-based API. Should match the checkpoint key which would have been + // assigned by tf.train.Saver. + string full_name = 2; + // The generated name of the Tensor in the checkpoint. + string checkpoint_key = 3; + // Whether checkpoints should be considered as matching even without this + // value restored. Used for non-critical values which don't affect the + // TensorFlow graph, such as layer configurations. + bool optional_restore = 4; + } + + message SlotVariableReference { + // An index into `TrackableObjectGraph.nodes`, indicating the + // variable object this slot was created for. + int32 original_variable_node_id = 1; + // The name of the slot (e.g. "m"/"v"). + string slot_name = 2; + // An index into `TrackableObjectGraph.nodes`, indicating the + // `Object` with the value of the slot variable. + int32 slot_variable_node_id = 3; + } + + // Objects which this object depends on. + repeated ObjectReference children = 1; + // Serialized data specific to this object. + repeated SerializedTensor attributes = 2; + // Slot variables owned by this object. + repeated SlotVariableReference slot_variables = 3; + } + + repeated TrackableObject nodes = 1; +} diff --git a/executor/proto/tensorflow/core/protobuf/transport_options.pb.go b/executor/proto/tensorflow/core/protobuf/transport_options.pb.go new file mode 100644 index 0000000000..3a543cb3f3 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/transport_options.pb.go @@ -0,0 +1,81 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/transport_options.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Extra data needed on a non-RDMA RecvBufResponse. +type RecvBufRespExtra struct { + TensorContent [][]byte `protobuf:"bytes,1,rep,name=tensor_content,json=tensorContent,proto3" json:"tensor_content,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RecvBufRespExtra) Reset() { *m = RecvBufRespExtra{} } +func (m *RecvBufRespExtra) String() string { return proto.CompactTextString(m) } +func (*RecvBufRespExtra) ProtoMessage() {} +func (*RecvBufRespExtra) Descriptor() ([]byte, []int) { + return fileDescriptor_527891df7bab7653, []int{0} +} + +func (m *RecvBufRespExtra) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RecvBufRespExtra.Unmarshal(m, b) +} +func (m *RecvBufRespExtra) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RecvBufRespExtra.Marshal(b, m, deterministic) +} +func (m *RecvBufRespExtra) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecvBufRespExtra.Merge(m, src) +} +func (m *RecvBufRespExtra) XXX_Size() int { + return xxx_messageInfo_RecvBufRespExtra.Size(m) +} +func (m *RecvBufRespExtra) XXX_DiscardUnknown() { + xxx_messageInfo_RecvBufRespExtra.DiscardUnknown(m) +} + +var xxx_messageInfo_RecvBufRespExtra proto.InternalMessageInfo + +func (m *RecvBufRespExtra) GetTensorContent() [][]byte { + if m != nil { + return m.TensorContent + } + return nil +} + +func init() { + proto.RegisterType((*RecvBufRespExtra)(nil), "tensorflow.RecvBufRespExtra") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/transport_options.proto", fileDescriptor_527891df7bab7653) +} + +var fileDescriptor_527891df7bab7653 = []byte{ + // 127 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x28, 0x49, 0xcd, 0x2b, + 0xce, 0x2f, 0x4a, 0xcb, 0xc9, 0x2f, 0xd7, 0x4f, 0xce, 0x2f, 0x4a, 0xd5, 0x2f, 0x28, 0xca, 0x2f, + 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x29, 0x4a, 0xcc, 0x2b, 0x2e, 0xc8, 0x2f, 0x2a, 0x89, 0xcf, + 0x2f, 0x28, 0xc9, 0xcc, 0xcf, 0x2b, 0xd6, 0x03, 0x4b, 0x09, 0x71, 0x21, 0x74, 0x28, 0x59, 0x72, + 0x09, 0x04, 0xa5, 0x26, 0x97, 0x39, 0x95, 0xa6, 0x05, 0xa5, 0x16, 0x17, 0xb8, 0x56, 0x94, 0x14, + 0x25, 0x0a, 0xa9, 0x72, 0xf1, 0x41, 0x54, 0xc4, 0x27, 0xe7, 0xe7, 0x95, 0xa4, 0xe6, 0x95, 0x48, + 0x30, 0x2a, 0x30, 0x6b, 0xf0, 0x04, 0xf1, 0x42, 0x44, 0x9d, 0x21, 0x82, 0x49, 0x6c, 0x60, 0xd3, + 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x78, 0x65, 0xc5, 0x6c, 0x81, 0x00, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/transport_options.proto b/executor/proto/tensorflow/core/protobuf/transport_options.proto new file mode 100644 index 0000000000..1d32475e9b --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/transport_options.proto @@ -0,0 +1,8 @@ +syntax = "proto3"; + +package tensorflow; + +// Extra data needed on a non-RDMA RecvBufResponse. +message RecvBufRespExtra { + repeated bytes tensor_content = 1; +}; diff --git a/executor/proto/tensorflow/core/protobuf/verifier_config.pb.go b/executor/proto/tensorflow/core/protobuf/verifier_config.pb.go new file mode 100644 index 0000000000..9376d064c5 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/verifier_config.pb.go @@ -0,0 +1,130 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/verifier_config.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type VerifierConfig_Toggle int32 + +const ( + VerifierConfig_DEFAULT VerifierConfig_Toggle = 0 + VerifierConfig_ON VerifierConfig_Toggle = 1 + VerifierConfig_OFF VerifierConfig_Toggle = 2 +) + +var VerifierConfig_Toggle_name = map[int32]string{ + 0: "DEFAULT", + 1: "ON", + 2: "OFF", +} + +var VerifierConfig_Toggle_value = map[string]int32{ + "DEFAULT": 0, + "ON": 1, + "OFF": 2, +} + +func (x VerifierConfig_Toggle) String() string { + return proto.EnumName(VerifierConfig_Toggle_name, int32(x)) +} + +func (VerifierConfig_Toggle) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5049fcf5d8bb3c3c, []int{0, 0} +} + +// The config for graph verifiers. +type VerifierConfig struct { + // Deadline for completion of all verification i.e. all the Toggle ON + // verifiers must complete execution within this time. + VerificationTimeoutInMs int64 `protobuf:"varint,1,opt,name=verification_timeout_in_ms,json=verificationTimeoutInMs,proto3" json:"verification_timeout_in_ms,omitempty"` + // Perform structural validation on a tensorflow graph. Default is OFF. + StructureVerifier VerifierConfig_Toggle `protobuf:"varint,2,opt,name=structure_verifier,json=structureVerifier,proto3,enum=tensorflow.VerifierConfig_Toggle" json:"structure_verifier,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VerifierConfig) Reset() { *m = VerifierConfig{} } +func (m *VerifierConfig) String() string { return proto.CompactTextString(m) } +func (*VerifierConfig) ProtoMessage() {} +func (*VerifierConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_5049fcf5d8bb3c3c, []int{0} +} + +func (m *VerifierConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VerifierConfig.Unmarshal(m, b) +} +func (m *VerifierConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VerifierConfig.Marshal(b, m, deterministic) +} +func (m *VerifierConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_VerifierConfig.Merge(m, src) +} +func (m *VerifierConfig) XXX_Size() int { + return xxx_messageInfo_VerifierConfig.Size(m) +} +func (m *VerifierConfig) XXX_DiscardUnknown() { + xxx_messageInfo_VerifierConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_VerifierConfig proto.InternalMessageInfo + +func (m *VerifierConfig) GetVerificationTimeoutInMs() int64 { + if m != nil { + return m.VerificationTimeoutInMs + } + return 0 +} + +func (m *VerifierConfig) GetStructureVerifier() VerifierConfig_Toggle { + if m != nil { + return m.StructureVerifier + } + return VerifierConfig_DEFAULT +} + +func init() { + proto.RegisterEnum("tensorflow.VerifierConfig_Toggle", VerifierConfig_Toggle_name, VerifierConfig_Toggle_value) + proto.RegisterType((*VerifierConfig)(nil), "tensorflow.VerifierConfig") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/verifier_config.proto", fileDescriptor_5049fcf5d8bb3c3c) +} + +var fileDescriptor_5049fcf5d8bb3c3c = []byte{ + // 261 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xf3, 0x40, + 0x14, 0x85, 0xff, 0x49, 0x21, 0x85, 0xfb, 0x43, 0x89, 0x83, 0x60, 0x70, 0x55, 0xbb, 0x90, 0xae, + 0x12, 0xd0, 0xa5, 0x6e, 0xac, 0x1a, 0x10, 0xd4, 0x86, 0x12, 0x5d, 0xb8, 0x09, 0x4d, 0x98, 0x19, + 0x07, 0x9b, 0xb9, 0x72, 0x67, 0xc6, 0x3e, 0xa2, 0xaf, 0xe4, 0x52, 0x4c, 0xac, 0x26, 0xe0, 0x6e, + 0x86, 0xf3, 0x1d, 0xbe, 0xcb, 0x81, 0xc4, 0x09, 0x63, 0x91, 0xe4, 0x06, 0xb7, 0x69, 0x8d, 0x24, + 0xd2, 0x57, 0x42, 0x87, 0x95, 0x97, 0xe9, 0x9b, 0x20, 0x2d, 0xb5, 0xa0, 0xb2, 0x46, 0x23, 0xb5, + 0x4a, 0xda, 0x80, 0xc3, 0x2f, 0x3f, 0x7b, 0x67, 0x30, 0x79, 0xfc, 0xa6, 0x2e, 0x5b, 0x88, 0x9f, + 0xc1, 0x61, 0xd7, 0xab, 0xd7, 0x4e, 0xa3, 0x29, 0x9d, 0x6e, 0x04, 0x7a, 0x57, 0x6a, 0x53, 0x36, + 0x36, 0x66, 0x53, 0x36, 0x1f, 0xad, 0x0e, 0xfa, 0x44, 0xd1, 0x01, 0x37, 0xe6, 0xce, 0xf2, 0x1c, + 0xb8, 0x75, 0xe4, 0x6b, 0xe7, 0x49, 0x94, 0x3b, 0x7d, 0x1c, 0x4c, 0xd9, 0x7c, 0x72, 0x72, 0xd4, + 0x3b, 0x34, 0x19, 0x4a, 0x93, 0x02, 0x95, 0xda, 0x88, 0xd5, 0xde, 0x4f, 0x79, 0x97, 0xcf, 0x8e, + 0x21, 0xec, 0x42, 0xfe, 0x1f, 0xc6, 0x57, 0xd7, 0xd9, 0xc5, 0xc3, 0x6d, 0x11, 0xfd, 0xe3, 0x21, + 0x04, 0xcb, 0xfb, 0x88, 0xf1, 0x31, 0x8c, 0x96, 0x59, 0x16, 0x05, 0x0b, 0x0b, 0x31, 0x92, 0xea, + 0x2b, 0x24, 0xad, 0x1b, 0xb1, 0x45, 0x7a, 0x59, 0xec, 0x0f, 0x6d, 0xf9, 0xd7, 0x0c, 0x36, 0x67, + 0x4f, 0xe7, 0x4a, 0xbb, 0x67, 0x5f, 0x25, 0x35, 0x36, 0x69, 0x6f, 0xc4, 0xbf, 0x9f, 0x0a, 0x87, + 0xeb, 0x7e, 0x30, 0x56, 0x85, 0xed, 0xe7, 0xf4, 0x33, 0x00, 0x00, 0xff, 0xff, 0x2b, 0xd2, 0x51, + 0x2c, 0x83, 0x01, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/verifier_config.proto b/executor/proto/tensorflow/core/protobuf/verifier_config.proto new file mode 100644 index 0000000000..5a1373b1cc --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/verifier_config.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "VerifierConfigProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf"; + +// The config for graph verifiers. +message VerifierConfig { + enum Toggle { + DEFAULT = 0; + ON = 1; + OFF = 2; + } + + // Deadline for completion of all verification i.e. all the Toggle ON + // verifiers must complete execution within this time. + int64 verification_timeout_in_ms = 1; + + // Perform structural validation on a tensorflow graph. Default is OFF. + Toggle structure_verifier = 2; + + // Next tag: 3 +} diff --git a/executor/proto/tensorflow/core/protobuf/worker.pb.go b/executor/proto/tensorflow/core/protobuf/worker.pb.go new file mode 100644 index 0000000000..0985a43635 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/worker.pb.go @@ -0,0 +1,2390 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/worker.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + framework "github.com/tensorflow/tensorflow/tensorflow/go/core/framework" + core "github.com/tensorflow/tensorflow/tensorflow/go/core/lib/core" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type GetStatusRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetStatusRequest) Reset() { *m = GetStatusRequest{} } +func (m *GetStatusRequest) String() string { return proto.CompactTextString(m) } +func (*GetStatusRequest) ProtoMessage() {} +func (*GetStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{0} +} + +func (m *GetStatusRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetStatusRequest.Unmarshal(m, b) +} +func (m *GetStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetStatusRequest.Marshal(b, m, deterministic) +} +func (m *GetStatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetStatusRequest.Merge(m, src) +} +func (m *GetStatusRequest) XXX_Size() int { + return xxx_messageInfo_GetStatusRequest.Size(m) +} +func (m *GetStatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetStatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetStatusRequest proto.InternalMessageInfo + +type GetStatusResponse struct { + DeviceAttributes []*framework.DeviceAttributes `protobuf:"bytes,1,rep,name=device_attributes,json=deviceAttributes,proto3" json:"device_attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetStatusResponse) Reset() { *m = GetStatusResponse{} } +func (m *GetStatusResponse) String() string { return proto.CompactTextString(m) } +func (*GetStatusResponse) ProtoMessage() {} +func (*GetStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{1} +} + +func (m *GetStatusResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetStatusResponse.Unmarshal(m, b) +} +func (m *GetStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetStatusResponse.Marshal(b, m, deterministic) +} +func (m *GetStatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetStatusResponse.Merge(m, src) +} +func (m *GetStatusResponse) XXX_Size() int { + return xxx_messageInfo_GetStatusResponse.Size(m) +} +func (m *GetStatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetStatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetStatusResponse proto.InternalMessageInfo + +func (m *GetStatusResponse) GetDeviceAttributes() []*framework.DeviceAttributes { + if m != nil { + return m.DeviceAttributes + } + return nil +} + +type CreateWorkerSessionRequest struct { + // Sessions are identified by a given handle. + SessionHandle string `protobuf:"bytes,1,opt,name=session_handle,json=sessionHandle,proto3" json:"session_handle,omitempty"` + // Defines the configuration of a TensorFlow worker. + ServerDef *ServerDef `protobuf:"bytes,2,opt,name=server_def,json=serverDef,proto3" json:"server_def,omitempty"` + // If true, any resources such as Variables used in the session will not be + // shared with other sessions. + IsolateSessionState bool `protobuf:"varint,3,opt,name=isolate_session_state,json=isolateSessionState,proto3" json:"isolate_session_state,omitempty"` + // The device attributes of all the devices in the cluster. + ClusterDeviceAttributes []*framework.DeviceAttributes `protobuf:"bytes,4,rep,name=cluster_device_attributes,json=clusterDeviceAttributes,proto3" json:"cluster_device_attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateWorkerSessionRequest) Reset() { *m = CreateWorkerSessionRequest{} } +func (m *CreateWorkerSessionRequest) String() string { return proto.CompactTextString(m) } +func (*CreateWorkerSessionRequest) ProtoMessage() {} +func (*CreateWorkerSessionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{2} +} + +func (m *CreateWorkerSessionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateWorkerSessionRequest.Unmarshal(m, b) +} +func (m *CreateWorkerSessionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateWorkerSessionRequest.Marshal(b, m, deterministic) +} +func (m *CreateWorkerSessionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateWorkerSessionRequest.Merge(m, src) +} +func (m *CreateWorkerSessionRequest) XXX_Size() int { + return xxx_messageInfo_CreateWorkerSessionRequest.Size(m) +} +func (m *CreateWorkerSessionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateWorkerSessionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateWorkerSessionRequest proto.InternalMessageInfo + +func (m *CreateWorkerSessionRequest) GetSessionHandle() string { + if m != nil { + return m.SessionHandle + } + return "" +} + +func (m *CreateWorkerSessionRequest) GetServerDef() *ServerDef { + if m != nil { + return m.ServerDef + } + return nil +} + +func (m *CreateWorkerSessionRequest) GetIsolateSessionState() bool { + if m != nil { + return m.IsolateSessionState + } + return false +} + +func (m *CreateWorkerSessionRequest) GetClusterDeviceAttributes() []*framework.DeviceAttributes { + if m != nil { + return m.ClusterDeviceAttributes + } + return nil +} + +type CreateWorkerSessionResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateWorkerSessionResponse) Reset() { *m = CreateWorkerSessionResponse{} } +func (m *CreateWorkerSessionResponse) String() string { return proto.CompactTextString(m) } +func (*CreateWorkerSessionResponse) ProtoMessage() {} +func (*CreateWorkerSessionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{3} +} + +func (m *CreateWorkerSessionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateWorkerSessionResponse.Unmarshal(m, b) +} +func (m *CreateWorkerSessionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateWorkerSessionResponse.Marshal(b, m, deterministic) +} +func (m *CreateWorkerSessionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateWorkerSessionResponse.Merge(m, src) +} +func (m *CreateWorkerSessionResponse) XXX_Size() int { + return xxx_messageInfo_CreateWorkerSessionResponse.Size(m) +} +func (m *CreateWorkerSessionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateWorkerSessionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateWorkerSessionResponse proto.InternalMessageInfo + +type DeleteWorkerSessionRequest struct { + // Sessions are identified by a given handle. + SessionHandle string `protobuf:"bytes,1,opt,name=session_handle,json=sessionHandle,proto3" json:"session_handle,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteWorkerSessionRequest) Reset() { *m = DeleteWorkerSessionRequest{} } +func (m *DeleteWorkerSessionRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteWorkerSessionRequest) ProtoMessage() {} +func (*DeleteWorkerSessionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{4} +} + +func (m *DeleteWorkerSessionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteWorkerSessionRequest.Unmarshal(m, b) +} +func (m *DeleteWorkerSessionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteWorkerSessionRequest.Marshal(b, m, deterministic) +} +func (m *DeleteWorkerSessionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteWorkerSessionRequest.Merge(m, src) +} +func (m *DeleteWorkerSessionRequest) XXX_Size() int { + return xxx_messageInfo_DeleteWorkerSessionRequest.Size(m) +} +func (m *DeleteWorkerSessionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteWorkerSessionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteWorkerSessionRequest proto.InternalMessageInfo + +func (m *DeleteWorkerSessionRequest) GetSessionHandle() string { + if m != nil { + return m.SessionHandle + } + return "" +} + +type DeleteWorkerSessionResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteWorkerSessionResponse) Reset() { *m = DeleteWorkerSessionResponse{} } +func (m *DeleteWorkerSessionResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteWorkerSessionResponse) ProtoMessage() {} +func (*DeleteWorkerSessionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{5} +} + +func (m *DeleteWorkerSessionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteWorkerSessionResponse.Unmarshal(m, b) +} +func (m *DeleteWorkerSessionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteWorkerSessionResponse.Marshal(b, m, deterministic) +} +func (m *DeleteWorkerSessionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteWorkerSessionResponse.Merge(m, src) +} +func (m *DeleteWorkerSessionResponse) XXX_Size() int { + return xxx_messageInfo_DeleteWorkerSessionResponse.Size(m) +} +func (m *DeleteWorkerSessionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteWorkerSessionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteWorkerSessionResponse proto.InternalMessageInfo + +type RegisterGraphRequest struct { + // Subgraphs are scoped within one session. + SessionHandle string `protobuf:"bytes,1,opt,name=session_handle,json=sessionHandle,proto3" json:"session_handle,omitempty"` + // Set to true if `CreateWorkerSession` was called for `session_handle`. + CreateWorkerSessionCalled bool `protobuf:"varint,6,opt,name=create_worker_session_called,json=createWorkerSessionCalled,proto3" json:"create_worker_session_called,omitempty"` + // "graph_def" has the subgraph of nodes for this worker, with each node + // having its device_name filled in. + GraphDef *framework.GraphDef `protobuf:"bytes,2,opt,name=graph_def,json=graphDef,proto3" json:"graph_def,omitempty"` + // True iff the graph (before partitioning) contains control flow nodes. + // + // As of 01/11/2015, this is no longer set by clients. + HasControlFlow bool `protobuf:"varint,3,opt,name=has_control_flow,json=hasControlFlow,proto3" json:"has_control_flow,omitempty"` // Deprecated: Do not use. + // Configuration options for the session in which this graph was created. + GraphOptions *GraphOptions `protobuf:"bytes,4,opt,name=graph_options,json=graphOptions,proto3" json:"graph_options,omitempty"` + // Field(s) used by TensorFlow Debugger (tfdbg). + DebugOptions *DebugOptions `protobuf:"bytes,5,opt,name=debug_options,json=debugOptions,proto3" json:"debug_options,omitempty"` + // If graph_def contains any collective ops this must be a positive + // integer used to coordinate execution with other graphs. All + // graphs in a distributed execution with the same + // collective_graph_key will coordinate to use the same step_id + // concurrently so that BufRendezvous entries will make the correct + // values accessible. + CollectiveGraphKey int64 `protobuf:"varint,7,opt,name=collective_graph_key,json=collectiveGraphKey,proto3" json:"collective_graph_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RegisterGraphRequest) Reset() { *m = RegisterGraphRequest{} } +func (m *RegisterGraphRequest) String() string { return proto.CompactTextString(m) } +func (*RegisterGraphRequest) ProtoMessage() {} +func (*RegisterGraphRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{6} +} + +func (m *RegisterGraphRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RegisterGraphRequest.Unmarshal(m, b) +} +func (m *RegisterGraphRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RegisterGraphRequest.Marshal(b, m, deterministic) +} +func (m *RegisterGraphRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegisterGraphRequest.Merge(m, src) +} +func (m *RegisterGraphRequest) XXX_Size() int { + return xxx_messageInfo_RegisterGraphRequest.Size(m) +} +func (m *RegisterGraphRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RegisterGraphRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RegisterGraphRequest proto.InternalMessageInfo + +func (m *RegisterGraphRequest) GetSessionHandle() string { + if m != nil { + return m.SessionHandle + } + return "" +} + +func (m *RegisterGraphRequest) GetCreateWorkerSessionCalled() bool { + if m != nil { + return m.CreateWorkerSessionCalled + } + return false +} + +func (m *RegisterGraphRequest) GetGraphDef() *framework.GraphDef { + if m != nil { + return m.GraphDef + } + return nil +} + +// Deprecated: Do not use. +func (m *RegisterGraphRequest) GetHasControlFlow() bool { + if m != nil { + return m.HasControlFlow + } + return false +} + +func (m *RegisterGraphRequest) GetGraphOptions() *GraphOptions { + if m != nil { + return m.GraphOptions + } + return nil +} + +func (m *RegisterGraphRequest) GetDebugOptions() *DebugOptions { + if m != nil { + return m.DebugOptions + } + return nil +} + +func (m *RegisterGraphRequest) GetCollectiveGraphKey() int64 { + if m != nil { + return m.CollectiveGraphKey + } + return 0 +} + +type RegisterGraphResponse struct { + // If the registration succeeds, returns an opaque graph_handle to + // the master. The master calls RunGraph with graph_handle to + // compute different steps. + GraphHandle string `protobuf:"bytes,1,opt,name=graph_handle,json=graphHandle,proto3" json:"graph_handle,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RegisterGraphResponse) Reset() { *m = RegisterGraphResponse{} } +func (m *RegisterGraphResponse) String() string { return proto.CompactTextString(m) } +func (*RegisterGraphResponse) ProtoMessage() {} +func (*RegisterGraphResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{7} +} + +func (m *RegisterGraphResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RegisterGraphResponse.Unmarshal(m, b) +} +func (m *RegisterGraphResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RegisterGraphResponse.Marshal(b, m, deterministic) +} +func (m *RegisterGraphResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegisterGraphResponse.Merge(m, src) +} +func (m *RegisterGraphResponse) XXX_Size() int { + return xxx_messageInfo_RegisterGraphResponse.Size(m) +} +func (m *RegisterGraphResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RegisterGraphResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RegisterGraphResponse proto.InternalMessageInfo + +func (m *RegisterGraphResponse) GetGraphHandle() string { + if m != nil { + return m.GraphHandle + } + return "" +} + +type DeregisterGraphRequest struct { + // The session_handle used when registering the graph. If session_handle is + // empty, a single global namespace is used. + SessionHandle string `protobuf:"bytes,2,opt,name=session_handle,json=sessionHandle,proto3" json:"session_handle,omitempty"` + // Set to true if `CreateWorkerSession` was called for `session_handle`. + CreateWorkerSessionCalled bool `protobuf:"varint,3,opt,name=create_worker_session_called,json=createWorkerSessionCalled,proto3" json:"create_worker_session_called,omitempty"` + // REQUIRED: graph_handle must be returned by a RegisterGraph call + // to the same WorkerService. + GraphHandle string `protobuf:"bytes,1,opt,name=graph_handle,json=graphHandle,proto3" json:"graph_handle,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeregisterGraphRequest) Reset() { *m = DeregisterGraphRequest{} } +func (m *DeregisterGraphRequest) String() string { return proto.CompactTextString(m) } +func (*DeregisterGraphRequest) ProtoMessage() {} +func (*DeregisterGraphRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{8} +} + +func (m *DeregisterGraphRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeregisterGraphRequest.Unmarshal(m, b) +} +func (m *DeregisterGraphRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeregisterGraphRequest.Marshal(b, m, deterministic) +} +func (m *DeregisterGraphRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeregisterGraphRequest.Merge(m, src) +} +func (m *DeregisterGraphRequest) XXX_Size() int { + return xxx_messageInfo_DeregisterGraphRequest.Size(m) +} +func (m *DeregisterGraphRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeregisterGraphRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeregisterGraphRequest proto.InternalMessageInfo + +func (m *DeregisterGraphRequest) GetSessionHandle() string { + if m != nil { + return m.SessionHandle + } + return "" +} + +func (m *DeregisterGraphRequest) GetCreateWorkerSessionCalled() bool { + if m != nil { + return m.CreateWorkerSessionCalled + } + return false +} + +func (m *DeregisterGraphRequest) GetGraphHandle() string { + if m != nil { + return m.GraphHandle + } + return "" +} + +type DeregisterGraphResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeregisterGraphResponse) Reset() { *m = DeregisterGraphResponse{} } +func (m *DeregisterGraphResponse) String() string { return proto.CompactTextString(m) } +func (*DeregisterGraphResponse) ProtoMessage() {} +func (*DeregisterGraphResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{9} +} + +func (m *DeregisterGraphResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeregisterGraphResponse.Unmarshal(m, b) +} +func (m *DeregisterGraphResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeregisterGraphResponse.Marshal(b, m, deterministic) +} +func (m *DeregisterGraphResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeregisterGraphResponse.Merge(m, src) +} +func (m *DeregisterGraphResponse) XXX_Size() int { + return xxx_messageInfo_DeregisterGraphResponse.Size(m) +} +func (m *DeregisterGraphResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeregisterGraphResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeregisterGraphResponse proto.InternalMessageInfo + +type CleanupAllRequest struct { + // A list of container names. + // + // If 'container' is not empty, releases resources in the given + // containers in all devices. + // + // If 'container' is empty, releases resources in the default + // container in all devices. + Container []string `protobuf:"bytes,1,rep,name=container,proto3" json:"container,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CleanupAllRequest) Reset() { *m = CleanupAllRequest{} } +func (m *CleanupAllRequest) String() string { return proto.CompactTextString(m) } +func (*CleanupAllRequest) ProtoMessage() {} +func (*CleanupAllRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{10} +} + +func (m *CleanupAllRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CleanupAllRequest.Unmarshal(m, b) +} +func (m *CleanupAllRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CleanupAllRequest.Marshal(b, m, deterministic) +} +func (m *CleanupAllRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CleanupAllRequest.Merge(m, src) +} +func (m *CleanupAllRequest) XXX_Size() int { + return xxx_messageInfo_CleanupAllRequest.Size(m) +} +func (m *CleanupAllRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CleanupAllRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CleanupAllRequest proto.InternalMessageInfo + +func (m *CleanupAllRequest) GetContainer() []string { + if m != nil { + return m.Container + } + return nil +} + +type CleanupAllResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CleanupAllResponse) Reset() { *m = CleanupAllResponse{} } +func (m *CleanupAllResponse) String() string { return proto.CompactTextString(m) } +func (*CleanupAllResponse) ProtoMessage() {} +func (*CleanupAllResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{11} +} + +func (m *CleanupAllResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CleanupAllResponse.Unmarshal(m, b) +} +func (m *CleanupAllResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CleanupAllResponse.Marshal(b, m, deterministic) +} +func (m *CleanupAllResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CleanupAllResponse.Merge(m, src) +} +func (m *CleanupAllResponse) XXX_Size() int { + return xxx_messageInfo_CleanupAllResponse.Size(m) +} +func (m *CleanupAllResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CleanupAllResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CleanupAllResponse proto.InternalMessageInfo + +// Options specific to the execution of a single step. +type ExecutorOpts struct { + RecordCosts bool `protobuf:"varint,1,opt,name=record_costs,json=recordCosts,proto3" json:"record_costs,omitempty"` + RecordTimeline bool `protobuf:"varint,3,opt,name=record_timeline,json=recordTimeline,proto3" json:"record_timeline,omitempty"` + RecordPartitionGraphs bool `protobuf:"varint,4,opt,name=record_partition_graphs,json=recordPartitionGraphs,proto3" json:"record_partition_graphs,omitempty"` + ReportTensorAllocationsUponOom bool `protobuf:"varint,5,opt,name=report_tensor_allocations_upon_oom,json=reportTensorAllocationsUponOom,proto3" json:"report_tensor_allocations_upon_oom,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecutorOpts) Reset() { *m = ExecutorOpts{} } +func (m *ExecutorOpts) String() string { return proto.CompactTextString(m) } +func (*ExecutorOpts) ProtoMessage() {} +func (*ExecutorOpts) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{12} +} + +func (m *ExecutorOpts) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecutorOpts.Unmarshal(m, b) +} +func (m *ExecutorOpts) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecutorOpts.Marshal(b, m, deterministic) +} +func (m *ExecutorOpts) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecutorOpts.Merge(m, src) +} +func (m *ExecutorOpts) XXX_Size() int { + return xxx_messageInfo_ExecutorOpts.Size(m) +} +func (m *ExecutorOpts) XXX_DiscardUnknown() { + xxx_messageInfo_ExecutorOpts.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecutorOpts proto.InternalMessageInfo + +func (m *ExecutorOpts) GetRecordCosts() bool { + if m != nil { + return m.RecordCosts + } + return false +} + +func (m *ExecutorOpts) GetRecordTimeline() bool { + if m != nil { + return m.RecordTimeline + } + return false +} + +func (m *ExecutorOpts) GetRecordPartitionGraphs() bool { + if m != nil { + return m.RecordPartitionGraphs + } + return false +} + +func (m *ExecutorOpts) GetReportTensorAllocationsUponOom() bool { + if m != nil { + return m.ReportTensorAllocationsUponOom + } + return false +} + +type RunGraphRequest struct { + // session_handle is the master-generated unique id for this session. + // If session_handle is non-empty, it must be the same as used when + // registering the graph. If it is empty, a single global namespace is used to + // search for the graph_handle. + SessionHandle string `protobuf:"bytes,8,opt,name=session_handle,json=sessionHandle,proto3" json:"session_handle,omitempty"` + // Set to true if `CreateWorkerSession` was called for `session_handle`. + CreateWorkerSessionCalled bool `protobuf:"varint,10,opt,name=create_worker_session_called,json=createWorkerSessionCalled,proto3" json:"create_worker_session_called,omitempty"` + // REQUIRED: graph_handle must be returned by a RegisterGraph call + // to the same WorkerService. + GraphHandle string `protobuf:"bytes,1,opt,name=graph_handle,json=graphHandle,proto3" json:"graph_handle,omitempty"` + // A unique ID to distinguish different runs of the same graph. + // + // The master generates a global unique `step_id` to distinguish + // different runs of the graph computation. Subgraphs communicate + // (e.g., send/recv ops) with each other using `step_id` to + // distinguish tensors generated by different runs. + StepId int64 `protobuf:"varint,2,opt,name=step_id,json=stepId,proto3" json:"step_id,omitempty"` + // Options for this step. + ExecOpts *ExecutorOpts `protobuf:"bytes,5,opt,name=exec_opts,json=execOpts,proto3" json:"exec_opts,omitempty"` + // Runs the graph. + // + // Sends the tensors in "send" into the graph before the run and + // fetches the keys into `RunGraphResponse.recv` after the run. + Send []*NamedTensorProto `protobuf:"bytes,3,rep,name=send,proto3" json:"send,omitempty"` + RecvKey []string `protobuf:"bytes,4,rep,name=recv_key,json=recvKey,proto3" json:"recv_key,omitempty"` + // True if the RunGraphRequest is a partial run request. + IsPartial bool `protobuf:"varint,6,opt,name=is_partial,json=isPartial,proto3" json:"is_partial,omitempty"` + // True if this is the last partial run request in a sequence of requests. + IsLastPartialRun bool `protobuf:"varint,7,opt,name=is_last_partial_run,json=isLastPartialRun,proto3" json:"is_last_partial_run,omitempty"` + // If true then some errors, e.g., execution errors that have long + // error messages, may return an OK RunGraphResponse with the actual + // error saved in the status_code/status_error_message fields of the + // response body. This is a workaround since the RPC subsystem may + // truncate long metadata messages. + StoreErrorsInResponseBody bool `protobuf:"varint,9,opt,name=store_errors_in_response_body,json=storeErrorsInResponseBody,proto3" json:"store_errors_in_response_body,omitempty"` + // Unique identifier for this request. Every RunGraphRequest must have a + // unique request_id, and retried RunGraphRequests must have the same + // request_id. If request_id is zero, retry detection is disabled. + // + // Retried RunGraphRequests are problematic because they may issue a + // RecvTensor that will have no corresponding sender and will wait forever. + // Workers use request_ids to reject retried RunGraph requests instead of + // waiting forever. + RequestId int64 `protobuf:"varint,11,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RunGraphRequest) Reset() { *m = RunGraphRequest{} } +func (m *RunGraphRequest) String() string { return proto.CompactTextString(m) } +func (*RunGraphRequest) ProtoMessage() {} +func (*RunGraphRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{13} +} + +func (m *RunGraphRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RunGraphRequest.Unmarshal(m, b) +} +func (m *RunGraphRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RunGraphRequest.Marshal(b, m, deterministic) +} +func (m *RunGraphRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunGraphRequest.Merge(m, src) +} +func (m *RunGraphRequest) XXX_Size() int { + return xxx_messageInfo_RunGraphRequest.Size(m) +} +func (m *RunGraphRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RunGraphRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RunGraphRequest proto.InternalMessageInfo + +func (m *RunGraphRequest) GetSessionHandle() string { + if m != nil { + return m.SessionHandle + } + return "" +} + +func (m *RunGraphRequest) GetCreateWorkerSessionCalled() bool { + if m != nil { + return m.CreateWorkerSessionCalled + } + return false +} + +func (m *RunGraphRequest) GetGraphHandle() string { + if m != nil { + return m.GraphHandle + } + return "" +} + +func (m *RunGraphRequest) GetStepId() int64 { + if m != nil { + return m.StepId + } + return 0 +} + +func (m *RunGraphRequest) GetExecOpts() *ExecutorOpts { + if m != nil { + return m.ExecOpts + } + return nil +} + +func (m *RunGraphRequest) GetSend() []*NamedTensorProto { + if m != nil { + return m.Send + } + return nil +} + +func (m *RunGraphRequest) GetRecvKey() []string { + if m != nil { + return m.RecvKey + } + return nil +} + +func (m *RunGraphRequest) GetIsPartial() bool { + if m != nil { + return m.IsPartial + } + return false +} + +func (m *RunGraphRequest) GetIsLastPartialRun() bool { + if m != nil { + return m.IsLastPartialRun + } + return false +} + +func (m *RunGraphRequest) GetStoreErrorsInResponseBody() bool { + if m != nil { + return m.StoreErrorsInResponseBody + } + return false +} + +func (m *RunGraphRequest) GetRequestId() int64 { + if m != nil { + return m.RequestId + } + return 0 +} + +type RunGraphResponse struct { + // A list of tensors corresponding to those requested by + // `RunGraphRequest.recv_key`. + Recv []*NamedTensorProto `protobuf:"bytes,1,rep,name=recv,proto3" json:"recv,omitempty"` + // If the request asked for execution stats, the cost graph, or the partition + // graphs, these are returned here. + // TODO(suharshs): Package these in a RunMetadata instead. + StepStats *framework.StepStats `protobuf:"bytes,2,opt,name=step_stats,json=stepStats,proto3" json:"step_stats,omitempty"` + CostGraph *framework.CostGraphDef `protobuf:"bytes,3,opt,name=cost_graph,json=costGraph,proto3" json:"cost_graph,omitempty"` + PartitionGraph []*framework.GraphDef `protobuf:"bytes,4,rep,name=partition_graph,json=partitionGraph,proto3" json:"partition_graph,omitempty"` + // If store_errors_in_response_body is true in the request, then + // optionally the server may return an OK status for the RPC and + // fill the true status into the fields below, to allow for messages + // that are too long to fit in metadata. + StatusCode core.Code `protobuf:"varint,5,opt,name=status_code,json=statusCode,proto3,enum=tensorflow.error.Code" json:"status_code,omitempty"` + StatusErrorMessage string `protobuf:"bytes,6,opt,name=status_error_message,json=statusErrorMessage,proto3" json:"status_error_message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RunGraphResponse) Reset() { *m = RunGraphResponse{} } +func (m *RunGraphResponse) String() string { return proto.CompactTextString(m) } +func (*RunGraphResponse) ProtoMessage() {} +func (*RunGraphResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{14} +} + +func (m *RunGraphResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RunGraphResponse.Unmarshal(m, b) +} +func (m *RunGraphResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RunGraphResponse.Marshal(b, m, deterministic) +} +func (m *RunGraphResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunGraphResponse.Merge(m, src) +} +func (m *RunGraphResponse) XXX_Size() int { + return xxx_messageInfo_RunGraphResponse.Size(m) +} +func (m *RunGraphResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RunGraphResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RunGraphResponse proto.InternalMessageInfo + +func (m *RunGraphResponse) GetRecv() []*NamedTensorProto { + if m != nil { + return m.Recv + } + return nil +} + +func (m *RunGraphResponse) GetStepStats() *framework.StepStats { + if m != nil { + return m.StepStats + } + return nil +} + +func (m *RunGraphResponse) GetCostGraph() *framework.CostGraphDef { + if m != nil { + return m.CostGraph + } + return nil +} + +func (m *RunGraphResponse) GetPartitionGraph() []*framework.GraphDef { + if m != nil { + return m.PartitionGraph + } + return nil +} + +func (m *RunGraphResponse) GetStatusCode() core.Code { + if m != nil { + return m.StatusCode + } + return core.Code_OK +} + +func (m *RunGraphResponse) GetStatusErrorMessage() string { + if m != nil { + return m.StatusErrorMessage + } + return "" +} + +type CleanupGraphRequest struct { + StepId int64 `protobuf:"varint,1,opt,name=step_id,json=stepId,proto3" json:"step_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CleanupGraphRequest) Reset() { *m = CleanupGraphRequest{} } +func (m *CleanupGraphRequest) String() string { return proto.CompactTextString(m) } +func (*CleanupGraphRequest) ProtoMessage() {} +func (*CleanupGraphRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{15} +} + +func (m *CleanupGraphRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CleanupGraphRequest.Unmarshal(m, b) +} +func (m *CleanupGraphRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CleanupGraphRequest.Marshal(b, m, deterministic) +} +func (m *CleanupGraphRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CleanupGraphRequest.Merge(m, src) +} +func (m *CleanupGraphRequest) XXX_Size() int { + return xxx_messageInfo_CleanupGraphRequest.Size(m) +} +func (m *CleanupGraphRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CleanupGraphRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CleanupGraphRequest proto.InternalMessageInfo + +func (m *CleanupGraphRequest) GetStepId() int64 { + if m != nil { + return m.StepId + } + return 0 +} + +type CleanupGraphResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CleanupGraphResponse) Reset() { *m = CleanupGraphResponse{} } +func (m *CleanupGraphResponse) String() string { return proto.CompactTextString(m) } +func (*CleanupGraphResponse) ProtoMessage() {} +func (*CleanupGraphResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{16} +} + +func (m *CleanupGraphResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CleanupGraphResponse.Unmarshal(m, b) +} +func (m *CleanupGraphResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CleanupGraphResponse.Marshal(b, m, deterministic) +} +func (m *CleanupGraphResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CleanupGraphResponse.Merge(m, src) +} +func (m *CleanupGraphResponse) XXX_Size() int { + return xxx_messageInfo_CleanupGraphResponse.Size(m) +} +func (m *CleanupGraphResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CleanupGraphResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CleanupGraphResponse proto.InternalMessageInfo + +type RecvTensorRequest struct { + // The step in which the tensor will be produced. + // + // REQUIRED: This must eventually correspond to the `step_id` passed + // into a RunGraph call on the same WorkerService. + StepId int64 `protobuf:"varint,1,opt,name=step_id,json=stepId,proto3" json:"step_id,omitempty"` + // A key identifying the channel to receive tensors from. A RecvTensor request + // retrieves one tensor from the channel, but multiple tensors can be sent and + // received over the same channel with multiple RecvTensor requests. See + // rendezvous.h for details. + RendezvousKey string `protobuf:"bytes,2,opt,name=rendezvous_key,json=rendezvousKey,proto3" json:"rendezvous_key,omitempty"` + // If true, use an out-of-band DMA mechanism to transfer the + // received tensor. + DmaOk bool `protobuf:"varint,3,opt,name=dma_ok,json=dmaOk,proto3" json:"dma_ok,omitempty"` + // Optional information on client-side device locality. + ClientLocality *framework.DeviceLocality `protobuf:"bytes,4,opt,name=client_locality,json=clientLocality,proto3" json:"client_locality,omitempty"` + // Optional information on server-side device locality. + ServerLocality *framework.DeviceLocality `protobuf:"bytes,5,opt,name=server_locality,json=serverLocality,proto3" json:"server_locality,omitempty"` + // Optional information needed by the RPC subsystem. + TransportOptions *any.Any `protobuf:"bytes,6,opt,name=transport_options,json=transportOptions,proto3" json:"transport_options,omitempty"` + // Unique identifier for this request. Every RecvTensorRequest must have a + // unique request_id, and retried RecvTensorRequests must have the same + // request_id. If request_id is zero, retry detection and response cache + // are disabled. + // + // Retried RecvTensorRequests are problematic because a RecvTensor with no + // corresponding sender will wait forever, and the tensor may have been + // delivered to a previous retry. Workers use request_ids to reject retried + // RecvTensor requests instead of waiting forever. + RequestId int64 `protobuf:"varint,7,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RecvTensorRequest) Reset() { *m = RecvTensorRequest{} } +func (m *RecvTensorRequest) String() string { return proto.CompactTextString(m) } +func (*RecvTensorRequest) ProtoMessage() {} +func (*RecvTensorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{17} +} + +func (m *RecvTensorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RecvTensorRequest.Unmarshal(m, b) +} +func (m *RecvTensorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RecvTensorRequest.Marshal(b, m, deterministic) +} +func (m *RecvTensorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecvTensorRequest.Merge(m, src) +} +func (m *RecvTensorRequest) XXX_Size() int { + return xxx_messageInfo_RecvTensorRequest.Size(m) +} +func (m *RecvTensorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RecvTensorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RecvTensorRequest proto.InternalMessageInfo + +func (m *RecvTensorRequest) GetStepId() int64 { + if m != nil { + return m.StepId + } + return 0 +} + +func (m *RecvTensorRequest) GetRendezvousKey() string { + if m != nil { + return m.RendezvousKey + } + return "" +} + +func (m *RecvTensorRequest) GetDmaOk() bool { + if m != nil { + return m.DmaOk + } + return false +} + +func (m *RecvTensorRequest) GetClientLocality() *framework.DeviceLocality { + if m != nil { + return m.ClientLocality + } + return nil +} + +func (m *RecvTensorRequest) GetServerLocality() *framework.DeviceLocality { + if m != nil { + return m.ServerLocality + } + return nil +} + +func (m *RecvTensorRequest) GetTransportOptions() *any.Any { + if m != nil { + return m.TransportOptions + } + return nil +} + +func (m *RecvTensorRequest) GetRequestId() int64 { + if m != nil { + return m.RequestId + } + return 0 +} + +type RecvTensorResponse struct { + // The tensor as a proto. + Tensor *framework.TensorProto `protobuf:"bytes,1,opt,name=tensor,proto3" json:"tensor,omitempty"` + // If true, this tensor was the output of a dead node, and the + // content is invalid. + IsDead bool `protobuf:"varint,2,opt,name=is_dead,json=isDead,proto3" json:"is_dead,omitempty"` + // The time at which tensor was available and started to be returned. + SendStartMicros int64 `protobuf:"varint,3,opt,name=send_start_micros,json=sendStartMicros,proto3" json:"send_start_micros,omitempty"` + // Optional additional information about how to receive the tensor, + // e.g. in the event that `RecvTensorRequest.dma_ok` was true. + TransportOptions *any.Any `protobuf:"bytes,4,opt,name=transport_options,json=transportOptions,proto3" json:"transport_options,omitempty"` + // Whether the receiver should send a MarkRecvFinishedRequest to the sender + // to ack the message. + RequireAck bool `protobuf:"varint,5,opt,name=require_ack,json=requireAck,proto3" json:"require_ack,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RecvTensorResponse) Reset() { *m = RecvTensorResponse{} } +func (m *RecvTensorResponse) String() string { return proto.CompactTextString(m) } +func (*RecvTensorResponse) ProtoMessage() {} +func (*RecvTensorResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{18} +} + +func (m *RecvTensorResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RecvTensorResponse.Unmarshal(m, b) +} +func (m *RecvTensorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RecvTensorResponse.Marshal(b, m, deterministic) +} +func (m *RecvTensorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecvTensorResponse.Merge(m, src) +} +func (m *RecvTensorResponse) XXX_Size() int { + return xxx_messageInfo_RecvTensorResponse.Size(m) +} +func (m *RecvTensorResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RecvTensorResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RecvTensorResponse proto.InternalMessageInfo + +func (m *RecvTensorResponse) GetTensor() *framework.TensorProto { + if m != nil { + return m.Tensor + } + return nil +} + +func (m *RecvTensorResponse) GetIsDead() bool { + if m != nil { + return m.IsDead + } + return false +} + +func (m *RecvTensorResponse) GetSendStartMicros() int64 { + if m != nil { + return m.SendStartMicros + } + return 0 +} + +func (m *RecvTensorResponse) GetTransportOptions() *any.Any { + if m != nil { + return m.TransportOptions + } + return nil +} + +func (m *RecvTensorResponse) GetRequireAck() bool { + if m != nil { + return m.RequireAck + } + return false +} + +// Message for managing the response cache maintained on the sender side. +// Currently only used by the gRPC worker service. +type MarkRecvFinishedRequest struct { + RequestId int64 `protobuf:"varint,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MarkRecvFinishedRequest) Reset() { *m = MarkRecvFinishedRequest{} } +func (m *MarkRecvFinishedRequest) String() string { return proto.CompactTextString(m) } +func (*MarkRecvFinishedRequest) ProtoMessage() {} +func (*MarkRecvFinishedRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{19} +} + +func (m *MarkRecvFinishedRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MarkRecvFinishedRequest.Unmarshal(m, b) +} +func (m *MarkRecvFinishedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MarkRecvFinishedRequest.Marshal(b, m, deterministic) +} +func (m *MarkRecvFinishedRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MarkRecvFinishedRequest.Merge(m, src) +} +func (m *MarkRecvFinishedRequest) XXX_Size() int { + return xxx_messageInfo_MarkRecvFinishedRequest.Size(m) +} +func (m *MarkRecvFinishedRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MarkRecvFinishedRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MarkRecvFinishedRequest proto.InternalMessageInfo + +func (m *MarkRecvFinishedRequest) GetRequestId() int64 { + if m != nil { + return m.RequestId + } + return 0 +} + +type MarkRecvFinishedResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MarkRecvFinishedResponse) Reset() { *m = MarkRecvFinishedResponse{} } +func (m *MarkRecvFinishedResponse) String() string { return proto.CompactTextString(m) } +func (*MarkRecvFinishedResponse) ProtoMessage() {} +func (*MarkRecvFinishedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{20} +} + +func (m *MarkRecvFinishedResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MarkRecvFinishedResponse.Unmarshal(m, b) +} +func (m *MarkRecvFinishedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MarkRecvFinishedResponse.Marshal(b, m, deterministic) +} +func (m *MarkRecvFinishedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MarkRecvFinishedResponse.Merge(m, src) +} +func (m *MarkRecvFinishedResponse) XXX_Size() int { + return xxx_messageInfo_MarkRecvFinishedResponse.Size(m) +} +func (m *MarkRecvFinishedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MarkRecvFinishedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MarkRecvFinishedResponse proto.InternalMessageInfo + +// Out-of-band request to begin or end logging, or +// to retrieve logs for particular steps. +type LoggingRequest struct { + // If true, RPC logging will be enabled. + EnableRpcLogging bool `protobuf:"varint,1,opt,name=enable_rpc_logging,json=enableRpcLogging,proto3" json:"enable_rpc_logging,omitempty"` + // If true, RPC logging will be disabled. + DisableRpcLogging bool `protobuf:"varint,4,opt,name=disable_rpc_logging,json=disableRpcLogging,proto3" json:"disable_rpc_logging,omitempty"` + // If true, discard any saved logging data (for all steps). + Clear bool `protobuf:"varint,2,opt,name=clear,proto3" json:"clear,omitempty"` + // When set, requests all saved log data pertaining to the step. + // Any log data retrieved is eliminated from the store and cannot be + // retrieved again. + FetchStepId []int64 `protobuf:"varint,3,rep,packed,name=fetch_step_id,json=fetchStepId,proto3" json:"fetch_step_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LoggingRequest) Reset() { *m = LoggingRequest{} } +func (m *LoggingRequest) String() string { return proto.CompactTextString(m) } +func (*LoggingRequest) ProtoMessage() {} +func (*LoggingRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{21} +} + +func (m *LoggingRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LoggingRequest.Unmarshal(m, b) +} +func (m *LoggingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LoggingRequest.Marshal(b, m, deterministic) +} +func (m *LoggingRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoggingRequest.Merge(m, src) +} +func (m *LoggingRequest) XXX_Size() int { + return xxx_messageInfo_LoggingRequest.Size(m) +} +func (m *LoggingRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LoggingRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LoggingRequest proto.InternalMessageInfo + +func (m *LoggingRequest) GetEnableRpcLogging() bool { + if m != nil { + return m.EnableRpcLogging + } + return false +} + +func (m *LoggingRequest) GetDisableRpcLogging() bool { + if m != nil { + return m.DisableRpcLogging + } + return false +} + +func (m *LoggingRequest) GetClear() bool { + if m != nil { + return m.Clear + } + return false +} + +func (m *LoggingRequest) GetFetchStepId() []int64 { + if m != nil { + return m.FetchStepId + } + return nil +} + +type LabeledStepStats struct { + StepId int64 `protobuf:"varint,1,opt,name=step_id,json=stepId,proto3" json:"step_id,omitempty"` + StepStats *framework.StepStats `protobuf:"bytes,2,opt,name=step_stats,json=stepStats,proto3" json:"step_stats,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabeledStepStats) Reset() { *m = LabeledStepStats{} } +func (m *LabeledStepStats) String() string { return proto.CompactTextString(m) } +func (*LabeledStepStats) ProtoMessage() {} +func (*LabeledStepStats) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{22} +} + +func (m *LabeledStepStats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabeledStepStats.Unmarshal(m, b) +} +func (m *LabeledStepStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabeledStepStats.Marshal(b, m, deterministic) +} +func (m *LabeledStepStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabeledStepStats.Merge(m, src) +} +func (m *LabeledStepStats) XXX_Size() int { + return xxx_messageInfo_LabeledStepStats.Size(m) +} +func (m *LabeledStepStats) XXX_DiscardUnknown() { + xxx_messageInfo_LabeledStepStats.DiscardUnknown(m) +} + +var xxx_messageInfo_LabeledStepStats proto.InternalMessageInfo + +func (m *LabeledStepStats) GetStepId() int64 { + if m != nil { + return m.StepId + } + return 0 +} + +func (m *LabeledStepStats) GetStepStats() *framework.StepStats { + if m != nil { + return m.StepStats + } + return nil +} + +type LoggingResponse struct { + Step []*LabeledStepStats `protobuf:"bytes,1,rep,name=step,proto3" json:"step,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LoggingResponse) Reset() { *m = LoggingResponse{} } +func (m *LoggingResponse) String() string { return proto.CompactTextString(m) } +func (*LoggingResponse) ProtoMessage() {} +func (*LoggingResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{23} +} + +func (m *LoggingResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LoggingResponse.Unmarshal(m, b) +} +func (m *LoggingResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LoggingResponse.Marshal(b, m, deterministic) +} +func (m *LoggingResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoggingResponse.Merge(m, src) +} +func (m *LoggingResponse) XXX_Size() int { + return xxx_messageInfo_LoggingResponse.Size(m) +} +func (m *LoggingResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LoggingResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LoggingResponse proto.InternalMessageInfo + +func (m *LoggingResponse) GetStep() []*LabeledStepStats { + if m != nil { + return m.Step + } + return nil +} + +type TraceOpts struct { + // Length of the trace to be taken, in seconds. + Duration float64 `protobuf:"fixed64,1,opt,name=duration,proto3" json:"duration,omitempty"` + // If true, capture step profile locally in each worker. Currently + // unimplemented. + UseStepProfiler bool `protobuf:"varint,2,opt,name=use_step_profiler,json=useStepProfiler,proto3" json:"use_step_profiler,omitempty"` + // If true, capture kernel events from each worker. + UseKernelProfiler bool `protobuf:"varint,3,opt,name=use_kernel_profiler,json=useKernelProfiler,proto3" json:"use_kernel_profiler,omitempty"` + // If true, capture extended profiling events from TensorFlow process. + UseExtendedProfiler bool `protobuf:"varint,4,opt,name=use_extended_profiler,json=useExtendedProfiler,proto3" json:"use_extended_profiler,omitempty"` + // If true, capture GPU profiling events locally on each + // machine. Currently unimplemented. + UseGpuProfiler bool `protobuf:"varint,5,opt,name=use_gpu_profiler,json=useGpuProfiler,proto3" json:"use_gpu_profiler,omitempty"` + // If true, collect sampled profile events. Currently unimplemented. + UseSampleProfiler bool `protobuf:"varint,6,opt,name=use_sample_profiler,json=useSampleProfiler,proto3" json:"use_sample_profiler,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TraceOpts) Reset() { *m = TraceOpts{} } +func (m *TraceOpts) String() string { return proto.CompactTextString(m) } +func (*TraceOpts) ProtoMessage() {} +func (*TraceOpts) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{24} +} + +func (m *TraceOpts) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TraceOpts.Unmarshal(m, b) +} +func (m *TraceOpts) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TraceOpts.Marshal(b, m, deterministic) +} +func (m *TraceOpts) XXX_Merge(src proto.Message) { + xxx_messageInfo_TraceOpts.Merge(m, src) +} +func (m *TraceOpts) XXX_Size() int { + return xxx_messageInfo_TraceOpts.Size(m) +} +func (m *TraceOpts) XXX_DiscardUnknown() { + xxx_messageInfo_TraceOpts.DiscardUnknown(m) +} + +var xxx_messageInfo_TraceOpts proto.InternalMessageInfo + +func (m *TraceOpts) GetDuration() float64 { + if m != nil { + return m.Duration + } + return 0 +} + +func (m *TraceOpts) GetUseStepProfiler() bool { + if m != nil { + return m.UseStepProfiler + } + return false +} + +func (m *TraceOpts) GetUseKernelProfiler() bool { + if m != nil { + return m.UseKernelProfiler + } + return false +} + +func (m *TraceOpts) GetUseExtendedProfiler() bool { + if m != nil { + return m.UseExtendedProfiler + } + return false +} + +func (m *TraceOpts) GetUseGpuProfiler() bool { + if m != nil { + return m.UseGpuProfiler + } + return false +} + +func (m *TraceOpts) GetUseSampleProfiler() bool { + if m != nil { + return m.UseSampleProfiler + } + return false +} + +// Out-of-band request to configure distributed tracing. +type TracingRequest struct { + Options *TraceOpts `protobuf:"bytes,1,opt,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TracingRequest) Reset() { *m = TracingRequest{} } +func (m *TracingRequest) String() string { return proto.CompactTextString(m) } +func (*TracingRequest) ProtoMessage() {} +func (*TracingRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{25} +} + +func (m *TracingRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TracingRequest.Unmarshal(m, b) +} +func (m *TracingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TracingRequest.Marshal(b, m, deterministic) +} +func (m *TracingRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TracingRequest.Merge(m, src) +} +func (m *TracingRequest) XXX_Size() int { + return xxx_messageInfo_TracingRequest.Size(m) +} +func (m *TracingRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TracingRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TracingRequest proto.InternalMessageInfo + +func (m *TracingRequest) GetOptions() *TraceOpts { + if m != nil { + return m.Options + } + return nil +} + +type TracingResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TracingResponse) Reset() { *m = TracingResponse{} } +func (m *TracingResponse) String() string { return proto.CompactTextString(m) } +func (*TracingResponse) ProtoMessage() {} +func (*TracingResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{26} +} + +func (m *TracingResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TracingResponse.Unmarshal(m, b) +} +func (m *TracingResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TracingResponse.Marshal(b, m, deterministic) +} +func (m *TracingResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TracingResponse.Merge(m, src) +} +func (m *TracingResponse) XXX_Size() int { + return xxx_messageInfo_TracingResponse.Size(m) +} +func (m *TracingResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TracingResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TracingResponse proto.InternalMessageInfo + +type RecvBufRequest struct { + // Used at server side to find the correct BufRendezvous. + StepId int64 `protobuf:"varint,1,opt,name=step_id,json=stepId,proto3" json:"step_id,omitempty"` + // Arbitrary string identifying a BufRendezvous entry. + BufRendezvousKey string `protobuf:"bytes,2,opt,name=buf_rendezvous_key,json=bufRendezvousKey,proto3" json:"buf_rendezvous_key,omitempty"` + // Size of value expected, must agree with BufRendezvous entry. + NumBytes int64 `protobuf:"varint,3,opt,name=num_bytes,json=numBytes,proto3" json:"num_bytes,omitempty"` + // When RDMA is in use, address of destination field on client. + BufPtr uint64 `protobuf:"fixed64,4,opt,name=buf_ptr,json=bufPtr,proto3" json:"buf_ptr,omitempty"` + // Optional information on client-side device locality. + ClientLocality *framework.DeviceLocality `protobuf:"bytes,5,opt,name=client_locality,json=clientLocality,proto3" json:"client_locality,omitempty"` + // Optional information on server-side device locality. + ServerLocality *framework.DeviceLocality `protobuf:"bytes,6,opt,name=server_locality,json=serverLocality,proto3" json:"server_locality,omitempty"` + // Optional, implementation-specific data. + TransportOptions *any.Any `protobuf:"bytes,7,opt,name=transport_options,json=transportOptions,proto3" json:"transport_options,omitempty"` + // For annotating timeline and device incarnation check. + SrcDevice string `protobuf:"bytes,8,opt,name=src_device,json=srcDevice,proto3" json:"src_device,omitempty"` + // Optional, for annotating the timeline. + DstDevice string `protobuf:"bytes,9,opt,name=dst_device,json=dstDevice,proto3" json:"dst_device,omitempty"` + // Depending on the RPC system in use, it may be necessary to set this + // id to detect resends of RPCs where the server is not aware that + // the prior RPC failed. + RequestId int64 `protobuf:"varint,10,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Incarnation number of the source device, used to detect worker failures. + SrcIncarnation uint64 `protobuf:"varint,11,opt,name=src_incarnation,json=srcIncarnation,proto3" json:"src_incarnation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RecvBufRequest) Reset() { *m = RecvBufRequest{} } +func (m *RecvBufRequest) String() string { return proto.CompactTextString(m) } +func (*RecvBufRequest) ProtoMessage() {} +func (*RecvBufRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{27} +} + +func (m *RecvBufRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RecvBufRequest.Unmarshal(m, b) +} +func (m *RecvBufRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RecvBufRequest.Marshal(b, m, deterministic) +} +func (m *RecvBufRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecvBufRequest.Merge(m, src) +} +func (m *RecvBufRequest) XXX_Size() int { + return xxx_messageInfo_RecvBufRequest.Size(m) +} +func (m *RecvBufRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RecvBufRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RecvBufRequest proto.InternalMessageInfo + +func (m *RecvBufRequest) GetStepId() int64 { + if m != nil { + return m.StepId + } + return 0 +} + +func (m *RecvBufRequest) GetBufRendezvousKey() string { + if m != nil { + return m.BufRendezvousKey + } + return "" +} + +func (m *RecvBufRequest) GetNumBytes() int64 { + if m != nil { + return m.NumBytes + } + return 0 +} + +func (m *RecvBufRequest) GetBufPtr() uint64 { + if m != nil { + return m.BufPtr + } + return 0 +} + +func (m *RecvBufRequest) GetClientLocality() *framework.DeviceLocality { + if m != nil { + return m.ClientLocality + } + return nil +} + +func (m *RecvBufRequest) GetServerLocality() *framework.DeviceLocality { + if m != nil { + return m.ServerLocality + } + return nil +} + +func (m *RecvBufRequest) GetTransportOptions() *any.Any { + if m != nil { + return m.TransportOptions + } + return nil +} + +func (m *RecvBufRequest) GetSrcDevice() string { + if m != nil { + return m.SrcDevice + } + return "" +} + +func (m *RecvBufRequest) GetDstDevice() string { + if m != nil { + return m.DstDevice + } + return "" +} + +func (m *RecvBufRequest) GetRequestId() int64 { + if m != nil { + return m.RequestId + } + return 0 +} + +func (m *RecvBufRequest) GetSrcIncarnation() uint64 { + if m != nil { + return m.SrcIncarnation + } + return 0 +} + +type RecvBufResponse struct { + BufPtr uint64 `protobuf:"fixed64,1,opt,name=buf_ptr,json=bufPtr,proto3" json:"buf_ptr,omitempty"` + NumBytes int64 `protobuf:"varint,2,opt,name=num_bytes,json=numBytes,proto3" json:"num_bytes,omitempty"` + IsDead bool `protobuf:"varint,3,opt,name=is_dead,json=isDead,proto3" json:"is_dead,omitempty"` + // Optional, implementation-specific data. + TransportOptions *any.Any `protobuf:"bytes,4,opt,name=transport_options,json=transportOptions,proto3" json:"transport_options,omitempty"` + // Optional, for timeline. + SendStartMicros int64 `protobuf:"varint,5,opt,name=send_start_micros,json=sendStartMicros,proto3" json:"send_start_micros,omitempty"` + // Whether the receiver should send a MarkRecvFinishedRequest to the sender + // to ack the message. + RequireAck bool `protobuf:"varint,6,opt,name=require_ack,json=requireAck,proto3" json:"require_ack,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RecvBufResponse) Reset() { *m = RecvBufResponse{} } +func (m *RecvBufResponse) String() string { return proto.CompactTextString(m) } +func (*RecvBufResponse) ProtoMessage() {} +func (*RecvBufResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{28} +} + +func (m *RecvBufResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RecvBufResponse.Unmarshal(m, b) +} +func (m *RecvBufResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RecvBufResponse.Marshal(b, m, deterministic) +} +func (m *RecvBufResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecvBufResponse.Merge(m, src) +} +func (m *RecvBufResponse) XXX_Size() int { + return xxx_messageInfo_RecvBufResponse.Size(m) +} +func (m *RecvBufResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RecvBufResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RecvBufResponse proto.InternalMessageInfo + +func (m *RecvBufResponse) GetBufPtr() uint64 { + if m != nil { + return m.BufPtr + } + return 0 +} + +func (m *RecvBufResponse) GetNumBytes() int64 { + if m != nil { + return m.NumBytes + } + return 0 +} + +func (m *RecvBufResponse) GetIsDead() bool { + if m != nil { + return m.IsDead + } + return false +} + +func (m *RecvBufResponse) GetTransportOptions() *any.Any { + if m != nil { + return m.TransportOptions + } + return nil +} + +func (m *RecvBufResponse) GetSendStartMicros() int64 { + if m != nil { + return m.SendStartMicros + } + return 0 +} + +func (m *RecvBufResponse) GetRequireAck() bool { + if m != nil { + return m.RequireAck + } + return false +} + +// Supplies one or more device names as members of the group identified by +// group_key. Service will respond when all group_size devices become known. +// All devices in group must have same type. +type CompleteGroupRequest struct { + GroupKey int32 `protobuf:"varint,1,opt,name=group_key,json=groupKey,proto3" json:"group_key,omitempty"` + GroupSize int32 `protobuf:"varint,2,opt,name=group_size,json=groupSize,proto3" json:"group_size,omitempty"` + DeviceType string `protobuf:"bytes,3,opt,name=device_type,json=deviceType,proto3" json:"device_type,omitempty"` + DeviceName []string `protobuf:"bytes,4,rep,name=device_name,json=deviceName,proto3" json:"device_name,omitempty"` + CollectiveType int32 `protobuf:"varint,5,opt,name=collective_type,json=collectiveType,proto3" json:"collective_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompleteGroupRequest) Reset() { *m = CompleteGroupRequest{} } +func (m *CompleteGroupRequest) String() string { return proto.CompactTextString(m) } +func (*CompleteGroupRequest) ProtoMessage() {} +func (*CompleteGroupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{29} +} + +func (m *CompleteGroupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompleteGroupRequest.Unmarshal(m, b) +} +func (m *CompleteGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompleteGroupRequest.Marshal(b, m, deterministic) +} +func (m *CompleteGroupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompleteGroupRequest.Merge(m, src) +} +func (m *CompleteGroupRequest) XXX_Size() int { + return xxx_messageInfo_CompleteGroupRequest.Size(m) +} +func (m *CompleteGroupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CompleteGroupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CompleteGroupRequest proto.InternalMessageInfo + +func (m *CompleteGroupRequest) GetGroupKey() int32 { + if m != nil { + return m.GroupKey + } + return 0 +} + +func (m *CompleteGroupRequest) GetGroupSize() int32 { + if m != nil { + return m.GroupSize + } + return 0 +} + +func (m *CompleteGroupRequest) GetDeviceType() string { + if m != nil { + return m.DeviceType + } + return "" +} + +func (m *CompleteGroupRequest) GetDeviceName() []string { + if m != nil { + return m.DeviceName + } + return nil +} + +func (m *CompleteGroupRequest) GetCollectiveType() int32 { + if m != nil { + return m.CollectiveType + } + return 0 +} + +// Gives the complete membership of the group identified by group_key. +type CompleteGroupResponse struct { + GroupKey int32 `protobuf:"varint,1,opt,name=group_key,json=groupKey,proto3" json:"group_key,omitempty"` + GroupSize int32 `protobuf:"varint,2,opt,name=group_size,json=groupSize,proto3" json:"group_size,omitempty"` + DeviceType string `protobuf:"bytes,3,opt,name=device_type,json=deviceType,proto3" json:"device_type,omitempty"` + NumTasks int32 `protobuf:"varint,4,opt,name=num_tasks,json=numTasks,proto3" json:"num_tasks,omitempty"` + DeviceName []string `protobuf:"bytes,5,rep,name=device_name,json=deviceName,proto3" json:"device_name,omitempty"` + TaskName []string `protobuf:"bytes,6,rep,name=task_name,json=taskName,proto3" json:"task_name,omitempty"` + CommunicatorKey []byte `protobuf:"bytes,7,opt,name=communicator_key,json=communicatorKey,proto3" json:"communicator_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompleteGroupResponse) Reset() { *m = CompleteGroupResponse{} } +func (m *CompleteGroupResponse) String() string { return proto.CompactTextString(m) } +func (*CompleteGroupResponse) ProtoMessage() {} +func (*CompleteGroupResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{30} +} + +func (m *CompleteGroupResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompleteGroupResponse.Unmarshal(m, b) +} +func (m *CompleteGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompleteGroupResponse.Marshal(b, m, deterministic) +} +func (m *CompleteGroupResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompleteGroupResponse.Merge(m, src) +} +func (m *CompleteGroupResponse) XXX_Size() int { + return xxx_messageInfo_CompleteGroupResponse.Size(m) +} +func (m *CompleteGroupResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CompleteGroupResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CompleteGroupResponse proto.InternalMessageInfo + +func (m *CompleteGroupResponse) GetGroupKey() int32 { + if m != nil { + return m.GroupKey + } + return 0 +} + +func (m *CompleteGroupResponse) GetGroupSize() int32 { + if m != nil { + return m.GroupSize + } + return 0 +} + +func (m *CompleteGroupResponse) GetDeviceType() string { + if m != nil { + return m.DeviceType + } + return "" +} + +func (m *CompleteGroupResponse) GetNumTasks() int32 { + if m != nil { + return m.NumTasks + } + return 0 +} + +func (m *CompleteGroupResponse) GetDeviceName() []string { + if m != nil { + return m.DeviceName + } + return nil +} + +func (m *CompleteGroupResponse) GetTaskName() []string { + if m != nil { + return m.TaskName + } + return nil +} + +func (m *CompleteGroupResponse) GetCommunicatorKey() []byte { + if m != nil { + return m.CommunicatorKey + } + return nil +} + +// Supplies data about one collective op belonging to the instance identified +// by instance_key. Service will respond when all group_size ops have +// become known. Most of the data being sent is for correctness checking, +// to ensure that all ops in the instance share common attributes. +type CompleteInstanceRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type int32 `protobuf:"varint,2,opt,name=type,proto3" json:"type,omitempty"` + DataType framework.DataType `protobuf:"varint,3,opt,name=data_type,json=dataType,proto3,enum=tensorflow.DataType" json:"data_type,omitempty"` + Shape *framework.TensorShapeProto `protobuf:"bytes,4,opt,name=shape,proto3" json:"shape,omitempty"` + GroupKey int32 `protobuf:"varint,5,opt,name=group_key,json=groupKey,proto3" json:"group_key,omitempty"` + GroupSize int32 `protobuf:"varint,6,opt,name=group_size,json=groupSize,proto3" json:"group_size,omitempty"` + InstanceKey int32 `protobuf:"varint,7,opt,name=instance_key,json=instanceKey,proto3" json:"instance_key,omitempty"` + DeviceType string `protobuf:"bytes,8,opt,name=device_type,json=deviceType,proto3" json:"device_type,omitempty"` + SubdivOffset []int32 `protobuf:"varint,9,rep,packed,name=subdiv_offset,json=subdivOffset,proto3" json:"subdiv_offset,omitempty"` + Device string `protobuf:"bytes,10,opt,name=device,proto3" json:"device,omitempty"` + IsSource bool `protobuf:"varint,11,opt,name=is_source,json=isSource,proto3" json:"is_source,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompleteInstanceRequest) Reset() { *m = CompleteInstanceRequest{} } +func (m *CompleteInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*CompleteInstanceRequest) ProtoMessage() {} +func (*CompleteInstanceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{31} +} + +func (m *CompleteInstanceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompleteInstanceRequest.Unmarshal(m, b) +} +func (m *CompleteInstanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompleteInstanceRequest.Marshal(b, m, deterministic) +} +func (m *CompleteInstanceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompleteInstanceRequest.Merge(m, src) +} +func (m *CompleteInstanceRequest) XXX_Size() int { + return xxx_messageInfo_CompleteInstanceRequest.Size(m) +} +func (m *CompleteInstanceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CompleteInstanceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CompleteInstanceRequest proto.InternalMessageInfo + +func (m *CompleteInstanceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CompleteInstanceRequest) GetType() int32 { + if m != nil { + return m.Type + } + return 0 +} + +func (m *CompleteInstanceRequest) GetDataType() framework.DataType { + if m != nil { + return m.DataType + } + return framework.DataType_DT_INVALID +} + +func (m *CompleteInstanceRequest) GetShape() *framework.TensorShapeProto { + if m != nil { + return m.Shape + } + return nil +} + +func (m *CompleteInstanceRequest) GetGroupKey() int32 { + if m != nil { + return m.GroupKey + } + return 0 +} + +func (m *CompleteInstanceRequest) GetGroupSize() int32 { + if m != nil { + return m.GroupSize + } + return 0 +} + +func (m *CompleteInstanceRequest) GetInstanceKey() int32 { + if m != nil { + return m.InstanceKey + } + return 0 +} + +func (m *CompleteInstanceRequest) GetDeviceType() string { + if m != nil { + return m.DeviceType + } + return "" +} + +func (m *CompleteInstanceRequest) GetSubdivOffset() []int32 { + if m != nil { + return m.SubdivOffset + } + return nil +} + +func (m *CompleteInstanceRequest) GetDevice() string { + if m != nil { + return m.Device + } + return "" +} + +func (m *CompleteInstanceRequest) GetIsSource() bool { + if m != nil { + return m.IsSource + } + return false +} + +// Confirms that every op in the instance has consistently declared itself. +// Also gives the source_rank in case of broadcast. +type CompleteInstanceResponse struct { + InstanceKey int32 `protobuf:"varint,1,opt,name=instance_key,json=instanceKey,proto3" json:"instance_key,omitempty"` + SourceRank int32 `protobuf:"varint,2,opt,name=source_rank,json=sourceRank,proto3" json:"source_rank,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompleteInstanceResponse) Reset() { *m = CompleteInstanceResponse{} } +func (m *CompleteInstanceResponse) String() string { return proto.CompactTextString(m) } +func (*CompleteInstanceResponse) ProtoMessage() {} +func (*CompleteInstanceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{32} +} + +func (m *CompleteInstanceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompleteInstanceResponse.Unmarshal(m, b) +} +func (m *CompleteInstanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompleteInstanceResponse.Marshal(b, m, deterministic) +} +func (m *CompleteInstanceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompleteInstanceResponse.Merge(m, src) +} +func (m *CompleteInstanceResponse) XXX_Size() int { + return xxx_messageInfo_CompleteInstanceResponse.Size(m) +} +func (m *CompleteInstanceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CompleteInstanceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CompleteInstanceResponse proto.InternalMessageInfo + +func (m *CompleteInstanceResponse) GetInstanceKey() int32 { + if m != nil { + return m.InstanceKey + } + return 0 +} + +func (m *CompleteInstanceResponse) GetSourceRank() int32 { + if m != nil { + return m.SourceRank + } + return 0 +} + +// Request for next agreed-upon step_id for the specified graph_keys. +// This is used to enable multiple graphs containing nodes from +// a common collective instance to coordinate using the same step_ids. +type GetStepSequenceRequest struct { + GraphKey []int64 `protobuf:"varint,1,rep,packed,name=graph_key,json=graphKey,proto3" json:"graph_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetStepSequenceRequest) Reset() { *m = GetStepSequenceRequest{} } +func (m *GetStepSequenceRequest) String() string { return proto.CompactTextString(m) } +func (*GetStepSequenceRequest) ProtoMessage() {} +func (*GetStepSequenceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{33} +} + +func (m *GetStepSequenceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetStepSequenceRequest.Unmarshal(m, b) +} +func (m *GetStepSequenceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetStepSequenceRequest.Marshal(b, m, deterministic) +} +func (m *GetStepSequenceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetStepSequenceRequest.Merge(m, src) +} +func (m *GetStepSequenceRequest) XXX_Size() int { + return xxx_messageInfo_GetStepSequenceRequest.Size(m) +} +func (m *GetStepSequenceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetStepSequenceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetStepSequenceRequest proto.InternalMessageInfo + +func (m *GetStepSequenceRequest) GetGraphKey() []int64 { + if m != nil { + return m.GraphKey + } + return nil +} + +type StepSequence struct { + GraphKey int64 `protobuf:"varint,1,opt,name=graph_key,json=graphKey,proto3" json:"graph_key,omitempty"` + NextStepId int64 `protobuf:"varint,2,opt,name=next_step_id,json=nextStepId,proto3" json:"next_step_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StepSequence) Reset() { *m = StepSequence{} } +func (m *StepSequence) String() string { return proto.CompactTextString(m) } +func (*StepSequence) ProtoMessage() {} +func (*StepSequence) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{34} +} + +func (m *StepSequence) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StepSequence.Unmarshal(m, b) +} +func (m *StepSequence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StepSequence.Marshal(b, m, deterministic) +} +func (m *StepSequence) XXX_Merge(src proto.Message) { + xxx_messageInfo_StepSequence.Merge(m, src) +} +func (m *StepSequence) XXX_Size() int { + return xxx_messageInfo_StepSequence.Size(m) +} +func (m *StepSequence) XXX_DiscardUnknown() { + xxx_messageInfo_StepSequence.DiscardUnknown(m) +} + +var xxx_messageInfo_StepSequence proto.InternalMessageInfo + +func (m *StepSequence) GetGraphKey() int64 { + if m != nil { + return m.GraphKey + } + return 0 +} + +func (m *StepSequence) GetNextStepId() int64 { + if m != nil { + return m.NextStepId + } + return 0 +} + +// Next valid step_ids for one or more graph_keys. +type GetStepSequenceResponse struct { + StepSequence []*StepSequence `protobuf:"bytes,1,rep,name=step_sequence,json=stepSequence,proto3" json:"step_sequence,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetStepSequenceResponse) Reset() { *m = GetStepSequenceResponse{} } +func (m *GetStepSequenceResponse) String() string { return proto.CompactTextString(m) } +func (*GetStepSequenceResponse) ProtoMessage() {} +func (*GetStepSequenceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f24b6dc95cbd078c, []int{35} +} + +func (m *GetStepSequenceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetStepSequenceResponse.Unmarshal(m, b) +} +func (m *GetStepSequenceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetStepSequenceResponse.Marshal(b, m, deterministic) +} +func (m *GetStepSequenceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetStepSequenceResponse.Merge(m, src) +} +func (m *GetStepSequenceResponse) XXX_Size() int { + return xxx_messageInfo_GetStepSequenceResponse.Size(m) +} +func (m *GetStepSequenceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetStepSequenceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetStepSequenceResponse proto.InternalMessageInfo + +func (m *GetStepSequenceResponse) GetStepSequence() []*StepSequence { + if m != nil { + return m.StepSequence + } + return nil +} + +func init() { + proto.RegisterType((*GetStatusRequest)(nil), "tensorflow.GetStatusRequest") + proto.RegisterType((*GetStatusResponse)(nil), "tensorflow.GetStatusResponse") + proto.RegisterType((*CreateWorkerSessionRequest)(nil), "tensorflow.CreateWorkerSessionRequest") + proto.RegisterType((*CreateWorkerSessionResponse)(nil), "tensorflow.CreateWorkerSessionResponse") + proto.RegisterType((*DeleteWorkerSessionRequest)(nil), "tensorflow.DeleteWorkerSessionRequest") + proto.RegisterType((*DeleteWorkerSessionResponse)(nil), "tensorflow.DeleteWorkerSessionResponse") + proto.RegisterType((*RegisterGraphRequest)(nil), "tensorflow.RegisterGraphRequest") + proto.RegisterType((*RegisterGraphResponse)(nil), "tensorflow.RegisterGraphResponse") + proto.RegisterType((*DeregisterGraphRequest)(nil), "tensorflow.DeregisterGraphRequest") + proto.RegisterType((*DeregisterGraphResponse)(nil), "tensorflow.DeregisterGraphResponse") + proto.RegisterType((*CleanupAllRequest)(nil), "tensorflow.CleanupAllRequest") + proto.RegisterType((*CleanupAllResponse)(nil), "tensorflow.CleanupAllResponse") + proto.RegisterType((*ExecutorOpts)(nil), "tensorflow.ExecutorOpts") + proto.RegisterType((*RunGraphRequest)(nil), "tensorflow.RunGraphRequest") + proto.RegisterType((*RunGraphResponse)(nil), "tensorflow.RunGraphResponse") + proto.RegisterType((*CleanupGraphRequest)(nil), "tensorflow.CleanupGraphRequest") + proto.RegisterType((*CleanupGraphResponse)(nil), "tensorflow.CleanupGraphResponse") + proto.RegisterType((*RecvTensorRequest)(nil), "tensorflow.RecvTensorRequest") + proto.RegisterType((*RecvTensorResponse)(nil), "tensorflow.RecvTensorResponse") + proto.RegisterType((*MarkRecvFinishedRequest)(nil), "tensorflow.MarkRecvFinishedRequest") + proto.RegisterType((*MarkRecvFinishedResponse)(nil), "tensorflow.MarkRecvFinishedResponse") + proto.RegisterType((*LoggingRequest)(nil), "tensorflow.LoggingRequest") + proto.RegisterType((*LabeledStepStats)(nil), "tensorflow.LabeledStepStats") + proto.RegisterType((*LoggingResponse)(nil), "tensorflow.LoggingResponse") + proto.RegisterType((*TraceOpts)(nil), "tensorflow.TraceOpts") + proto.RegisterType((*TracingRequest)(nil), "tensorflow.TracingRequest") + proto.RegisterType((*TracingResponse)(nil), "tensorflow.TracingResponse") + proto.RegisterType((*RecvBufRequest)(nil), "tensorflow.RecvBufRequest") + proto.RegisterType((*RecvBufResponse)(nil), "tensorflow.RecvBufResponse") + proto.RegisterType((*CompleteGroupRequest)(nil), "tensorflow.CompleteGroupRequest") + proto.RegisterType((*CompleteGroupResponse)(nil), "tensorflow.CompleteGroupResponse") + proto.RegisterType((*CompleteInstanceRequest)(nil), "tensorflow.CompleteInstanceRequest") + proto.RegisterType((*CompleteInstanceResponse)(nil), "tensorflow.CompleteInstanceResponse") + proto.RegisterType((*GetStepSequenceRequest)(nil), "tensorflow.GetStepSequenceRequest") + proto.RegisterType((*StepSequence)(nil), "tensorflow.StepSequence") + proto.RegisterType((*GetStepSequenceResponse)(nil), "tensorflow.GetStepSequenceResponse") +} + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/worker.proto", fileDescriptor_f24b6dc95cbd078c) +} + +var fileDescriptor_f24b6dc95cbd078c = []byte{ + // 2214 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x58, 0x4f, 0x73, 0x1b, 0x4b, + 0x11, 0xaf, 0xb5, 0x2c, 0x59, 0x6a, 0x3b, 0xb2, 0xbc, 0xb1, 0x63, 0xc5, 0x49, 0xc0, 0x6f, 0x21, + 0x60, 0x1e, 0xc1, 0x4e, 0x0c, 0x8f, 0x47, 0x51, 0xa4, 0xc0, 0x7f, 0xf2, 0x82, 0x5f, 0x12, 0xe2, + 0x1a, 0x9b, 0xe2, 0x15, 0x07, 0xb6, 0x46, 0xbb, 0x23, 0x79, 0x4b, 0xab, 0x9d, 0x65, 0x66, 0xd6, + 0x89, 0x72, 0xe4, 0xc2, 0x91, 0xe2, 0x33, 0x70, 0xe1, 0x4b, 0xf0, 0x35, 0xb8, 0x41, 0x71, 0xe4, + 0x44, 0x71, 0xa4, 0x38, 0x51, 0x3d, 0x33, 0xbb, 0x5a, 0x69, 0xe5, 0xe7, 0x24, 0x2f, 0xdc, 0x76, + 0xba, 0x7f, 0xd3, 0xd3, 0xd3, 0xbf, 0xee, 0x9e, 0x99, 0x85, 0xfb, 0x8a, 0x25, 0x92, 0x8b, 0x7e, + 0xcc, 0x5f, 0xed, 0x05, 0x5c, 0xb0, 0xbd, 0x54, 0x70, 0xc5, 0x7b, 0x59, 0x7f, 0xef, 0x15, 0x17, + 0x43, 0x26, 0x76, 0xf5, 0xd8, 0x85, 0x09, 0x6c, 0xeb, 0xf6, 0x80, 0xf3, 0x41, 0x5c, 0x42, 0xd2, + 0x64, 0x6c, 0x60, 0x5b, 0x1f, 0xcf, 0x5a, 0xeb, 0x0b, 0x3a, 0x62, 0x68, 0x69, 0x2f, 0xe0, 0x52, + 0xf9, 0x03, 0x41, 0xd3, 0x0b, 0x8b, 0x7d, 0x74, 0x35, 0x36, 0x64, 0x97, 0x51, 0xc0, 0x7c, 0xaa, + 0x94, 0x88, 0x7a, 0x99, 0x62, 0xd2, 0x4e, 0xb9, 0x7f, 0xf5, 0x94, 0xb2, 0xe5, 0x2f, 0xf1, 0x42, + 0x2a, 0x96, 0xfa, 0x52, 0x51, 0x95, 0x9b, 0xfc, 0xd6, 0xd5, 0x58, 0xa3, 0xb1, 0xb8, 0x07, 0xd7, + 0xe1, 0x7c, 0x79, 0x41, 0x53, 0x76, 0xbd, 0xa3, 0x6a, 0x9c, 0x16, 0xfb, 0xa9, 0x38, 0x1a, 0x47, + 0x3d, 0xf3, 0xc1, 0x84, 0xe0, 0xc2, 0x0f, 0x78, 0x78, 0xf5, 0xde, 0x8b, 0xf0, 0x07, 0x3c, 0xe9, + 0x47, 0x03, 0x0b, 0xfb, 0xe6, 0x95, 0xb0, 0x90, 0xf5, 0xb2, 0x1c, 0xf5, 0xdd, 0x2b, 0x51, 0x09, + 0x1d, 0xb1, 0xd0, 0x9f, 0xda, 0xfa, 0xc3, 0x2b, 0xc1, 0x13, 0x85, 0x2f, 0x99, 0xb8, 0xcc, 0xb3, + 0xc5, 0x73, 0xa1, 0xf3, 0x94, 0xa9, 0x33, 0x45, 0x55, 0x26, 0x09, 0xfb, 0x6d, 0xc6, 0xa4, 0xf2, + 0x7e, 0x03, 0x6b, 0x25, 0x99, 0x4c, 0x79, 0x22, 0x99, 0x7b, 0x02, 0x6b, 0x15, 0xae, 0xbb, 0xce, + 0x76, 0x6d, 0x67, 0x79, 0xff, 0xee, 0xee, 0xc4, 0xfa, 0xee, 0xb1, 0x06, 0x1d, 0x14, 0x18, 0xd2, + 0x09, 0x67, 0x24, 0xde, 0xef, 0x17, 0x60, 0xeb, 0x48, 0x30, 0xaa, 0xd8, 0xaf, 0x74, 0xe2, 0x9e, + 0x31, 0x29, 0x23, 0x9e, 0xd8, 0xe5, 0xdd, 0xfb, 0xd0, 0x96, 0x46, 0xe2, 0x5f, 0xd0, 0x24, 0x8c, + 0x59, 0xd7, 0xd9, 0x76, 0x76, 0x5a, 0xe4, 0x86, 0x95, 0xfe, 0x5c, 0x0b, 0xdd, 0x1f, 0x00, 0x98, + 0x9d, 0xf8, 0x21, 0xeb, 0x77, 0x17, 0xb6, 0x9d, 0x9d, 0xe5, 0xfd, 0x8d, 0xb2, 0x27, 0x67, 0x5a, + 0x7b, 0xcc, 0xfa, 0xa4, 0x25, 0xf3, 0x4f, 0x77, 0x1f, 0x36, 0x22, 0xc9, 0x63, 0xaa, 0x98, 0x9f, + 0x2f, 0x82, 0x39, 0xc6, 0xba, 0xb5, 0x6d, 0x67, 0xa7, 0x49, 0x6e, 0x5a, 0xa5, 0x75, 0x09, 0x63, + 0xc0, 0xdc, 0x2f, 0xe0, 0x76, 0x10, 0x67, 0x52, 0xe9, 0xa5, 0x66, 0x43, 0xb0, 0xf8, 0x16, 0x21, + 0xd8, 0xb4, 0xd3, 0x67, 0x15, 0xde, 0x3d, 0xb8, 0x33, 0x37, 0x10, 0x26, 0xe6, 0xde, 0x11, 0x6c, + 0x1d, 0xb3, 0x98, 0x7d, 0xa5, 0x38, 0xe1, 0x1a, 0x73, 0x8d, 0xd8, 0x35, 0xfe, 0x50, 0x83, 0x75, + 0xc2, 0x06, 0x11, 0xfa, 0xf7, 0x14, 0x2b, 0xf3, 0x1d, 0x69, 0xf8, 0x29, 0xdc, 0x0d, 0xf4, 0x16, + 0x7c, 0xd3, 0x85, 0x8a, 0xb0, 0x06, 0x34, 0x8e, 0x59, 0xd8, 0x6d, 0xe8, 0xb8, 0xde, 0x0e, 0xaa, + 0xdb, 0x3c, 0xd2, 0x00, 0xf7, 0x11, 0xb4, 0x74, 0x47, 0x28, 0xd1, 0xb8, 0x5e, 0x8e, 0xa6, 0x76, + 0x0a, 0x59, 0x6c, 0x0e, 0xec, 0x97, 0xfb, 0x00, 0x3a, 0x17, 0x54, 0xfa, 0x01, 0x4f, 0x94, 0xe0, + 0xb1, 0x8f, 0x30, 0xc3, 0xdf, 0xe1, 0x42, 0xd7, 0x21, 0xed, 0x0b, 0x2a, 0x8f, 0x8c, 0xea, 0xb3, + 0x98, 0xbf, 0x72, 0x1f, 0xc3, 0x0d, 0xb3, 0x00, 0x4f, 0x55, 0xc4, 0x13, 0xa4, 0x0c, 0x17, 0xe9, + 0x56, 0x16, 0x79, 0x69, 0xf4, 0x64, 0x65, 0x50, 0x1a, 0xe1, 0x74, 0x5d, 0x8f, 0xc5, 0xf4, 0x7a, + 0x75, 0xfa, 0x31, 0x02, 0x8a, 0xe9, 0x61, 0x69, 0xe4, 0x3e, 0x84, 0xf5, 0x80, 0xc7, 0x31, 0x0b, + 0x54, 0x74, 0xc9, 0x4c, 0x57, 0xf5, 0x87, 0x6c, 0xdc, 0x5d, 0xda, 0x76, 0x76, 0x6a, 0xc4, 0x9d, + 0xe8, 0xb4, 0x0b, 0xcf, 0xd8, 0xd8, 0xfb, 0x31, 0x6c, 0xcc, 0x10, 0x62, 0x4b, 0xf0, 0x23, 0x30, + 0x9e, 0x4d, 0xf3, 0xb1, 0xac, 0x65, 0x96, 0xec, 0x3f, 0x39, 0x70, 0xeb, 0x98, 0x89, 0xb7, 0xe3, + 0x73, 0xe1, 0x7d, 0xf8, 0xac, 0x5d, 0xc7, 0xe7, 0x5b, 0x78, 0x79, 0x1b, 0x36, 0x2b, 0x4e, 0xda, + 0x74, 0x7c, 0x04, 0x6b, 0x47, 0x31, 0xa3, 0x49, 0x96, 0x1e, 0xc4, 0x71, 0xee, 0xfa, 0x5d, 0x68, + 0x21, 0xd7, 0x34, 0x4a, 0x98, 0xd0, 0x3d, 0xa7, 0x45, 0x26, 0x02, 0x6f, 0x1d, 0xdc, 0xf2, 0x14, + 0x6b, 0xe8, 0xef, 0x0e, 0xac, 0x3c, 0x79, 0xcd, 0x82, 0x4c, 0x71, 0xf1, 0x32, 0x55, 0x12, 0xfd, + 0x12, 0x2c, 0xe0, 0x22, 0xf4, 0xf1, 0x7c, 0x93, 0xda, 0xaf, 0x26, 0x59, 0x36, 0xb2, 0x23, 0x14, + 0xb9, 0xdf, 0x86, 0x55, 0x0b, 0x51, 0xd1, 0x88, 0xc5, 0x51, 0x92, 0xb7, 0x85, 0xb6, 0x11, 0x9f, + 0x5b, 0xa9, 0xfb, 0x43, 0xd8, 0xb4, 0xc0, 0x94, 0x0a, 0x15, 0x21, 0xd3, 0x86, 0x5a, 0x93, 0x5c, + 0x4d, 0xb2, 0x61, 0xd4, 0xa7, 0xb9, 0x56, 0x6f, 0x52, 0xba, 0x9f, 0x83, 0x27, 0x58, 0xca, 0x85, + 0xb2, 0x6d, 0xdb, 0xa7, 0x71, 0xcc, 0x03, 0xaa, 0x33, 0xc5, 0xcf, 0x52, 0x9e, 0xf8, 0x9c, 0x8f, + 0x74, 0x82, 0x35, 0xc9, 0xd7, 0x0c, 0xf2, 0x5c, 0x03, 0x0f, 0x26, 0xb8, 0x5f, 0xa6, 0x3c, 0x79, + 0xc9, 0x47, 0xde, 0x3f, 0x6b, 0xb0, 0x4a, 0xb2, 0xe4, 0x1a, 0x8e, 0x9b, 0xef, 0xc3, 0x31, 0x7c, + 0x75, 0x8e, 0xdd, 0x4d, 0x58, 0xd2, 0x27, 0x78, 0x14, 0xea, 0x3c, 0xab, 0x91, 0x06, 0x0e, 0x4f, + 0x42, 0xf7, 0x13, 0x68, 0xb1, 0xd7, 0x2c, 0xc0, 0x72, 0x9a, 0x5b, 0x4b, 0x65, 0xd2, 0x48, 0x13, + 0xa1, 0x9a, 0xbe, 0x87, 0xb0, 0x28, 0x59, 0x82, 0xf9, 0x57, 0xe9, 0xb7, 0xbf, 0xc0, 0x83, 0xd0, + 0xc4, 0xe9, 0x14, 0x0f, 0x35, 0xa2, 0x91, 0xee, 0x6d, 0x68, 0x0a, 0x16, 0x5c, 0xea, 0x6a, 0x5b, + 0xd4, 0x49, 0xb3, 0x84, 0xe3, 0x67, 0x6c, 0xec, 0xde, 0x03, 0x88, 0xa4, 0xe1, 0x8e, 0xc6, 0xb6, + 0x45, 0xb5, 0x22, 0x79, 0x6a, 0x04, 0xee, 0xf7, 0xe0, 0x66, 0x24, 0xfd, 0x98, 0x4a, 0x95, 0x63, + 0x7c, 0x91, 0x25, 0xba, 0x64, 0x9b, 0xa4, 0x13, 0xc9, 0xe7, 0x54, 0x2a, 0x8b, 0x25, 0x59, 0xe2, + 0xfe, 0x0c, 0xee, 0x49, 0xc5, 0x05, 0xf3, 0xf5, 0x55, 0x40, 0xfa, 0x51, 0xe2, 0x0b, 0x9b, 0x86, + 0x7e, 0x8f, 0x87, 0xe3, 0x6e, 0xcb, 0xc4, 0x53, 0x83, 0x9e, 0x68, 0xcc, 0x49, 0xd1, 0x80, 0x0f, + 0x79, 0xa8, 0xfd, 0x11, 0x86, 0x42, 0x8c, 0xd7, 0xb2, 0x8e, 0x57, 0xcb, 0x4a, 0x4e, 0x42, 0xef, + 0x6f, 0x0b, 0xd0, 0x99, 0x50, 0x6d, 0xbb, 0xc1, 0x43, 0x58, 0xc4, 0xed, 0xcc, 0x3b, 0x83, 0xab, + 0x01, 0x41, 0xa4, 0x3e, 0x31, 0x8b, 0x4b, 0xd5, 0xdc, 0x13, 0x53, 0xb1, 0x14, 0x8f, 0x3c, 0x49, + 0x5a, 0x32, 0xff, 0x74, 0x3f, 0x05, 0x98, 0x5c, 0x08, 0x75, 0x3d, 0xcc, 0x10, 0x86, 0xb5, 0x53, + 0x34, 0xe9, 0x56, 0x90, 0x8f, 0xdc, 0xc7, 0xb0, 0x3a, 0x53, 0x1d, 0xf6, 0xb0, 0x9c, 0xdf, 0xde, + 0xdb, 0xe9, 0x54, 0xb1, 0xb8, 0x9f, 0xc2, 0xb2, 0xd4, 0x57, 0x10, 0x7d, 0xb7, 0xd2, 0x99, 0xd2, + 0xde, 0xbf, 0x55, 0x9e, 0xaa, 0xc3, 0xbd, 0x7b, 0xc4, 0x43, 0x46, 0xc0, 0x40, 0xf1, 0x1b, 0x3b, + 0xae, 0x9d, 0x68, 0xae, 0x66, 0x23, 0x26, 0x25, 0x1d, 0x30, 0x4d, 0x73, 0x8b, 0xb8, 0x46, 0xa7, + 0x69, 0x78, 0x61, 0x34, 0xde, 0x2e, 0xdc, 0xb4, 0x1d, 0x64, 0xaa, 0x9a, 0x4a, 0x29, 0xec, 0x94, + 0x53, 0xd8, 0xbb, 0x05, 0xeb, 0xd3, 0x78, 0xdb, 0x73, 0xfe, 0xba, 0x00, 0x6b, 0x84, 0x05, 0x97, + 0x26, 0xf4, 0xd7, 0x99, 0xc1, 0x6a, 0x15, 0x2c, 0x09, 0xd9, 0x9b, 0x4b, 0x9e, 0x49, 0x9d, 0xa6, + 0xb6, 0x23, 0x4f, 0xa4, 0x98, 0xac, 0x1b, 0xd0, 0x08, 0x47, 0xd4, 0xe7, 0x43, 0xdb, 0x8c, 0xea, + 0xe1, 0x88, 0xbe, 0x1c, 0xba, 0x47, 0xb0, 0x1a, 0xc4, 0x11, 0x4b, 0x94, 0x8f, 0xad, 0x21, 0x8e, + 0xd4, 0xd8, 0x1e, 0x6c, 0x5b, 0xd5, 0xbb, 0xc8, 0x73, 0x8b, 0x20, 0x6d, 0x33, 0x25, 0x1f, 0xa3, + 0x11, 0x7b, 0x89, 0x2a, 0x8c, 0xd4, 0xaf, 0x37, 0x62, 0xa6, 0x14, 0x46, 0x0e, 0x60, 0x4d, 0x09, + 0x9a, 0x48, 0xdd, 0xd8, 0xf2, 0x53, 0xb2, 0x61, 0x4f, 0x72, 0xf3, 0x02, 0xd9, 0xcd, 0x2f, 0xa2, + 0xbb, 0x07, 0xc9, 0x98, 0x74, 0x0a, 0x78, 0x7e, 0x4a, 0x4e, 0x17, 0xc0, 0xd2, 0x6c, 0x01, 0xfc, + 0xcb, 0x01, 0xb7, 0x1c, 0x58, 0x5b, 0x02, 0x7b, 0xd0, 0x30, 0x5e, 0xea, 0xc0, 0x2e, 0xef, 0x6f, + 0x96, 0x9d, 0x2e, 0xe7, 0xbf, 0x85, 0x21, 0x15, 0x91, 0xf4, 0x43, 0x46, 0x4d, 0x53, 0x6a, 0x92, + 0x46, 0x24, 0x8f, 0x19, 0x0d, 0xdd, 0x8f, 0x61, 0x0d, 0x7b, 0x06, 0x96, 0x86, 0x50, 0xfe, 0x28, + 0x0a, 0x04, 0x97, 0x3a, 0xdc, 0x35, 0xb2, 0x8a, 0x8a, 0x33, 0x94, 0xbf, 0xd0, 0xe2, 0xf9, 0xdb, + 0x5d, 0x7c, 0xa7, 0xed, 0x7e, 0x1d, 0x96, 0x71, 0x73, 0x91, 0x60, 0x3e, 0x0d, 0x86, 0xb6, 0xe1, + 0x83, 0x15, 0x1d, 0x04, 0x43, 0xef, 0x47, 0xb0, 0xf9, 0x82, 0x8a, 0x21, 0xee, 0xf9, 0xb3, 0x28, + 0x89, 0xe4, 0x05, 0x0b, 0xf3, 0x74, 0x9a, 0x0e, 0x95, 0x33, 0x1b, 0xaa, 0x2d, 0xe8, 0x56, 0x67, + 0xda, 0xfc, 0xfc, 0xb3, 0x03, 0xed, 0xe7, 0x7c, 0x30, 0x88, 0x92, 0x41, 0x6e, 0xed, 0x01, 0xb8, + 0x2c, 0xa1, 0xbd, 0x98, 0xf9, 0x22, 0x0d, 0xfc, 0xd8, 0x28, 0xed, 0xd9, 0xd8, 0x31, 0x1a, 0x92, + 0x06, 0x76, 0x92, 0xbb, 0x0b, 0x37, 0xc3, 0x48, 0x56, 0xe0, 0xe6, 0xcc, 0x5b, 0xb3, 0xaa, 0x12, + 0x7e, 0x1d, 0xea, 0x41, 0xcc, 0xa8, 0xb0, 0xd1, 0x36, 0x03, 0xd7, 0x83, 0x1b, 0x7d, 0xa6, 0x82, + 0x0b, 0x3f, 0x2f, 0x0b, 0xec, 0xe9, 0x35, 0xb2, 0xac, 0x85, 0x67, 0xa6, 0xc4, 0x28, 0x74, 0x9e, + 0xd3, 0x1e, 0x8b, 0x59, 0x58, 0x34, 0xa5, 0xab, 0x0b, 0xe9, 0xbd, 0x1a, 0x9b, 0x77, 0x04, 0xab, + 0x45, 0x30, 0x26, 0x3d, 0x15, 0xf5, 0xf3, 0x7a, 0xea, 0xac, 0x37, 0x44, 0x23, 0xbd, 0x3f, 0x2e, + 0x40, 0xeb, 0x5c, 0xd0, 0x80, 0xe9, 0x43, 0x6a, 0x0b, 0x9a, 0x61, 0x26, 0xf4, 0x39, 0xad, 0x5d, + 0x74, 0x48, 0x31, 0xc6, 0x14, 0xcb, 0x24, 0x33, 0x7b, 0x4e, 0x05, 0xef, 0x47, 0x31, 0xcb, 0xe3, + 0xb2, 0x9a, 0x49, 0x86, 0x86, 0x4f, 0xad, 0x18, 0xe3, 0x8c, 0xd8, 0x21, 0x13, 0x09, 0x8b, 0x27, + 0x68, 0x53, 0xff, 0x68, 0xe6, 0x99, 0xd6, 0x14, 0xf8, 0x7d, 0xd8, 0x40, 0x3c, 0x7b, 0xad, 0xb0, + 0x73, 0x84, 0x93, 0x19, 0x86, 0x19, 0x34, 0xf6, 0xc4, 0xea, 0x8a, 0x39, 0x3b, 0xd0, 0xc1, 0x39, + 0x83, 0x34, 0x9b, 0xc0, 0x4d, 0x22, 0xb6, 0x33, 0xc9, 0x9e, 0xa6, 0xd9, 0xac, 0x37, 0x92, 0x8e, + 0xd2, 0x98, 0x4d, 0xc0, 0x8d, 0xc2, 0x9b, 0x33, 0xad, 0xc9, 0xf1, 0xde, 0x01, 0xb4, 0x31, 0x24, + 0xa5, 0x2c, 0xdb, 0x83, 0xa5, 0xbc, 0x50, 0x9c, 0x2a, 0x3b, 0x45, 0xfc, 0x48, 0x8e, 0xf2, 0xd6, + 0x60, 0xb5, 0x30, 0x61, 0x93, 0xf7, 0x1f, 0x35, 0x68, 0x63, 0x56, 0x1f, 0x66, 0xfd, 0x6b, 0x3b, + 0xeb, 0x03, 0x70, 0x7b, 0x59, 0xdf, 0x9f, 0xdb, 0x5d, 0x3b, 0x3d, 0x34, 0x50, 0x6e, 0xb0, 0x77, + 0xa0, 0x95, 0x64, 0x23, 0xbf, 0x37, 0xc6, 0xf7, 0x9c, 0x29, 0xfa, 0x66, 0x92, 0x8d, 0x0e, 0x71, + 0x8c, 0x6b, 0xa0, 0xa9, 0x54, 0x99, 0x60, 0x36, 0x48, 0xa3, 0x97, 0xf5, 0x4f, 0x95, 0x98, 0xd7, + 0x7f, 0xeb, 0x1f, 0xa2, 0xff, 0x36, 0x3e, 0x4c, 0xff, 0x5d, 0x7a, 0xd7, 0xfe, 0x2b, 0x45, 0x60, + 0x9f, 0xb7, 0xf6, 0xd2, 0xd8, 0x92, 0x22, 0x30, 0x2b, 0xa3, 0x3a, 0x94, 0x2a, 0x57, 0xb7, 0x8c, + 0x3a, 0x94, 0x6a, 0xa2, 0x2e, 0xb5, 0x24, 0x98, 0x69, 0x49, 0x78, 0xad, 0x46, 0xe3, 0x51, 0x12, + 0x50, 0x91, 0x98, 0xe2, 0xc0, 0x2b, 0xce, 0x22, 0x69, 0x4b, 0x11, 0x9c, 0x4c, 0xa4, 0xde, 0xbf, + 0x1d, 0x58, 0x2d, 0x28, 0xb6, 0x25, 0x59, 0x8a, 0xbf, 0x33, 0x15, 0xff, 0x29, 0xd6, 0x16, 0xaa, + 0xac, 0xe5, 0x8d, 0xbe, 0x36, 0xd5, 0xe8, 0x3f, 0x40, 0xf3, 0x9e, 0x7b, 0x56, 0xd4, 0xe7, 0x9f, + 0x15, 0x33, 0x8d, 0xbe, 0x51, 0x69, 0xf4, 0x7f, 0x71, 0x60, 0xfd, 0x88, 0x63, 0xf9, 0x28, 0xf6, + 0x54, 0xf0, 0x2c, 0xcd, 0x73, 0xfb, 0x0e, 0x3e, 0x8b, 0x79, 0x96, 0xea, 0xcc, 0xc5, 0x9d, 0xd7, + 0xf1, 0x01, 0xcc, 0xb3, 0xd4, 0xde, 0x5f, 0x8d, 0x52, 0x46, 0x6f, 0xcc, 0x3b, 0xae, 0x4e, 0x0c, + 0xfc, 0x2c, 0x7a, 0xc3, 0x70, 0x55, 0xfb, 0xa3, 0x42, 0x8d, 0x53, 0xf3, 0x86, 0x69, 0x11, 0x30, + 0xa2, 0xf3, 0x71, 0x5a, 0x06, 0x24, 0x74, 0xc4, 0xec, 0xed, 0xd8, 0x02, 0xf0, 0xf2, 0x88, 0x94, + 0x95, 0x5e, 0xad, 0xda, 0x4a, 0x5d, 0xaf, 0xd2, 0x9e, 0x88, 0xd1, 0x92, 0xf7, 0x5f, 0x07, 0x36, + 0x66, 0xfc, 0xb7, 0xc4, 0xfd, 0x5f, 0x37, 0x60, 0xc9, 0x57, 0x54, 0x0e, 0x0d, 0x7d, 0x75, 0x4d, + 0xfe, 0x39, 0x8e, 0x67, 0x77, 0x57, 0xaf, 0xec, 0xee, 0x0e, 0xb4, 0x70, 0xa6, 0x51, 0x37, 0xb4, + 0xba, 0x89, 0x02, 0xad, 0xfc, 0x0e, 0x74, 0x02, 0x3e, 0x1a, 0x65, 0x49, 0x14, 0x50, 0xc5, 0x45, + 0xf1, 0x58, 0x5f, 0x21, 0xab, 0x65, 0x39, 0xbe, 0xd4, 0x7f, 0x57, 0x83, 0xcd, 0x7c, 0xf3, 0x27, + 0x89, 0x54, 0x34, 0x09, 0x58, 0xce, 0x9f, 0x0b, 0x8b, 0xda, 0xbc, 0x79, 0x1a, 0xe9, 0x6f, 0x94, + 0xe9, 0xfd, 0x98, 0xfd, 0xea, 0x6f, 0xf7, 0x11, 0xb4, 0x42, 0xaa, 0xe8, 0x64, 0xa3, 0xed, 0xe9, + 0xfb, 0xf1, 0x31, 0x55, 0x14, 0xb7, 0x4c, 0x9a, 0xa1, 0xfd, 0x72, 0xf7, 0xa1, 0xae, 0xff, 0x60, + 0xda, 0xbc, 0xbd, 0x5b, 0xbd, 0xf5, 0x9c, 0xa1, 0xda, 0x5c, 0x7d, 0x0c, 0x74, 0x9a, 0x8d, 0xfa, + 0x97, 0xb2, 0xd1, 0x98, 0x65, 0xe3, 0x23, 0x58, 0x89, 0xec, 0xee, 0x8a, 0x68, 0xd4, 0xc9, 0x72, + 0x2e, 0x43, 0x0b, 0x33, 0x84, 0x35, 0x2b, 0x84, 0x7d, 0x03, 0x6e, 0xc8, 0xac, 0x17, 0x46, 0x97, + 0x3e, 0xef, 0xf7, 0x25, 0x53, 0xdd, 0xd6, 0x76, 0x6d, 0xa7, 0x4e, 0x56, 0x8c, 0xf0, 0xa5, 0x96, + 0xb9, 0xb7, 0xa0, 0x61, 0x5b, 0x0c, 0x68, 0x03, 0x76, 0x84, 0xce, 0x47, 0xd2, 0x97, 0x3c, 0x13, + 0x01, 0xd3, 0xad, 0xa3, 0x49, 0x9a, 0x91, 0x3c, 0xd3, 0x63, 0x2f, 0x84, 0x6e, 0x95, 0x83, 0xc9, + 0x1f, 0x93, 0x29, 0xcf, 0x9d, 0xb9, 0x9e, 0x1b, 0xc3, 0xbe, 0xa0, 0xc9, 0xd0, 0x52, 0x03, 0x46, + 0x44, 0x68, 0x32, 0xfc, 0x7c, 0xb1, 0x59, 0xeb, 0x2c, 0x7a, 0x9f, 0xc0, 0x2d, 0xfd, 0x4f, 0x94, + 0xa5, 0x67, 0x48, 0x70, 0x89, 0xe8, 0x3b, 0xf9, 0xff, 0x2b, 0xb3, 0x00, 0xde, 0x64, 0xcc, 0x9f, + 0x2a, 0xcc, 0x90, 0x17, 0xb0, 0x52, 0x9e, 0x33, 0x0b, 0x76, 0xca, 0x60, 0x77, 0x1b, 0x56, 0x12, + 0xf6, 0x5a, 0xf9, 0xd3, 0xef, 0x66, 0x40, 0x99, 0xbd, 0x15, 0x7d, 0x01, 0x9b, 0x15, 0x2f, 0xec, + 0x56, 0x1f, 0xc3, 0x0d, 0x73, 0x07, 0xb2, 0x0a, 0x7b, 0x87, 0xe9, 0x56, 0xae, 0x41, 0xf9, 0xc4, + 0x15, 0x59, 0x1a, 0x1d, 0x8e, 0x60, 0x8b, 0x8b, 0x41, 0x19, 0x1c, 0x46, 0x52, 0x89, 0x2c, 0x51, + 0xd1, 0x88, 0x1d, 0xae, 0x98, 0x9f, 0x00, 0x3a, 0xa3, 0xe4, 0xa9, 0xf3, 0xeb, 0x9f, 0x0c, 0x22, + 0x75, 0x91, 0xf5, 0x76, 0x03, 0x3e, 0x2a, 0xfd, 0x59, 0xbe, 0xe2, 0x73, 0xc0, 0xa7, 0xff, 0x45, + 0xff, 0xc7, 0x71, 0x7a, 0x0d, 0x3d, 0xf8, 0xfe, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x92, 0x41, + 0x12, 0x3f, 0xd4, 0x18, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/worker.proto b/executor/proto/tensorflow/core/protobuf/worker.proto new file mode 100644 index 0000000000..88fb76de65 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/worker.proto @@ -0,0 +1,588 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto3"; + +package tensorflow; + +option cc_enable_arenas = true; +option java_outer_classname = "WorkerProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.distruntime"; + +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf"; +import "google/protobuf/any.proto"; +import "tensorflow/core/framework/cost_graph.proto"; +import "tensorflow/core/framework/device_attributes.proto"; +import "tensorflow/core/framework/graph.proto"; +import "tensorflow/core/framework/step_stats.proto"; +import "tensorflow/core/framework/tensor.proto"; +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/types.proto"; +import "tensorflow/core/lib/core/error_codes.proto"; +import "tensorflow/core/protobuf/config.proto"; +import "tensorflow/core/protobuf/debug.proto"; +import "tensorflow/core/protobuf/named_tensor.proto"; +import "tensorflow/core/protobuf/tensorflow_server.proto"; + +//////////////////////////////////////////////////////////////////////////////// +// +// GetStatus method request/response messages +// +//////////////////////////////////////////////////////////////////////////////// + +message GetStatusRequest {} + +message GetStatusResponse { + repeated DeviceAttributes device_attributes = 1; +} + +//////////////////////////////////////////////////////////////////////////////// +// +// CreateSession method request/response messages +// +// For each session, +// +//////////////////////////////////////////////////////////////////////////////// + +message CreateWorkerSessionRequest { + // Sessions are identified by a given handle. + string session_handle = 1; + + // Defines the configuration of a TensorFlow worker. + ServerDef server_def = 2; + + // If true, any resources such as Variables used in the session will not be + // shared with other sessions. + bool isolate_session_state = 3; + + // The device attributes of all the devices in the cluster. + repeated DeviceAttributes cluster_device_attributes = 4; +} + +message CreateWorkerSessionResponse {} + +//////////////////////////////////////////////////////////////////////////////// +// +// DeleteSession method request/response messages +// +// Deletes all worker-side state associated with the given session handle. +// +//////////////////////////////////////////////////////////////////////////////// + +message DeleteWorkerSessionRequest { + // Sessions are identified by a given handle. + string session_handle = 1; +} + +message DeleteWorkerSessionResponse {} + +//////////////////////////////////////////////////////////////////////////////// +// +// RegisterGraph method request/response messages +// +// For each session, after the master placed every node on a device, +// it partitions the whole graph into many subgraphs. All the nodes in +// a subgraph were in the same worker, but potentially on many devices +// owned by that worker (e.g. cpu0, plus gpu0, gpu1, ..., gpu7). The +// master registers subgraphs for a worker before running any steps. A +// successful registration returns a graph handle to be used in latter +// RunGraph requests. +// +//////////////////////////////////////////////////////////////////////////////// + +message RegisterGraphRequest { + // Subgraphs are scoped within one session. + string session_handle = 1; + + // Set to true if `CreateWorkerSession` was called for `session_handle`. + bool create_worker_session_called = 6; + + // "graph_def" has the subgraph of nodes for this worker, with each node + // having its device_name filled in. + GraphDef graph_def = 2; + + // True iff the graph (before partitioning) contains control flow nodes. + // + // As of 01/11/2015, this is no longer set by clients. + bool has_control_flow = 3 [deprecated = true]; + + // Configuration options for the session in which this graph was created. + GraphOptions graph_options = 4; + + // Field(s) used by TensorFlow Debugger (tfdbg). + DebugOptions debug_options = 5; + + // If graph_def contains any collective ops this must be a positive + // integer used to coordinate execution with other graphs. All + // graphs in a distributed execution with the same + // collective_graph_key will coordinate to use the same step_id + // concurrently so that BufRendezvous entries will make the correct + // values accessible. + int64 collective_graph_key = 7; +} + +message RegisterGraphResponse { + // If the registration succeeds, returns an opaque graph_handle to + // the master. The master calls RunGraph with graph_handle to + // compute different steps. + string graph_handle = 1; +} + +//////////////////////////////////////////////////////////////////////////////// +// +// DeregisterGraph method request/response messages +// +// The master deregisters the given graph_handle when the graph is no +// longer needed (e.g., the overall graph is re-scheduled and nodes +// are re-placed). +// +// The worker deregisters a graph_handle automatically according to on +// a TTL-base policy in case of master restarts. +// +//////////////////////////////////////////////////////////////////////////////// + +message DeregisterGraphRequest { + // The session_handle used when registering the graph. If session_handle is + // empty, a single global namespace is used. + string session_handle = 2; + + // Set to true if `CreateWorkerSession` was called for `session_handle`. + bool create_worker_session_called = 3; + + // REQUIRED: graph_handle must be returned by a RegisterGraph call + // to the same WorkerService. + string graph_handle = 1; +} + +message DeregisterGraphResponse { + // TODO(mrry): Optionally add summary stats for the graph. +} + +//////////////////////////////////////////////////////////////////////////////// +// +// CleanupAll method request/response messages +// +//////////////////////////////////////////////////////////////////////////////// + +message CleanupAllRequest { + // A list of container names. + // + // If 'container' is not empty, releases resources in the given + // containers in all devices. + // + // If 'container' is empty, releases resources in the default + // container in all devices. + repeated string container = 1; +} + +message CleanupAllResponse {} + +//////////////////////////////////////////////////////////////////////////////// +// +// RunGraph request / response messages +// +// The worker executes all subgraphs registered under graph_handle. +// RunGraph returns after the execution finishes or an error is +// encountered. +// A sequence of RunGraphRequests with is_partial may be sent to RunGraph for +// partial graph execution. +// +//////////////////////////////////////////////////////////////////////////////// + +// Options specific to the execution of a single step. +message ExecutorOpts { + bool record_costs = 1; + bool record_timeline = 3; + bool record_partition_graphs = 4; + bool report_tensor_allocations_upon_oom = 5; +} + +message RunGraphRequest { + // session_handle is the master-generated unique id for this session. + // If session_handle is non-empty, it must be the same as used when + // registering the graph. If it is empty, a single global namespace is used to + // search for the graph_handle. + string session_handle = 8; + + // Set to true if `CreateWorkerSession` was called for `session_handle`. + bool create_worker_session_called = 10; + + // REQUIRED: graph_handle must be returned by a RegisterGraph call + // to the same WorkerService. + string graph_handle = 1; + + // A unique ID to distinguish different runs of the same graph. + // + // The master generates a global unique `step_id` to distinguish + // different runs of the graph computation. Subgraphs communicate + // (e.g., send/recv ops) with each other using `step_id` to + // distinguish tensors generated by different runs. + int64 step_id = 2; + + // Options for this step. + ExecutorOpts exec_opts = 5; + + // Runs the graph. + // + // Sends the tensors in "send" into the graph before the run and + // fetches the keys into `RunGraphResponse.recv` after the run. + repeated NamedTensorProto send = 3; + repeated string recv_key = 4; + + // True if the RunGraphRequest is a partial run request. + bool is_partial = 6; + // True if this is the last partial run request in a sequence of requests. + bool is_last_partial_run = 7; + + // If true then some errors, e.g., execution errors that have long + // error messages, may return an OK RunGraphResponse with the actual + // error saved in the status_code/status_error_message fields of the + // response body. This is a workaround since the RPC subsystem may + // truncate long metadata messages. + bool store_errors_in_response_body = 9; + + // Unique identifier for this request. Every RunGraphRequest must have a + // unique request_id, and retried RunGraphRequests must have the same + // request_id. If request_id is zero, retry detection is disabled. + // + // Retried RunGraphRequests are problematic because they may issue a + // RecvTensor that will have no corresponding sender and will wait forever. + // Workers use request_ids to reject retried RunGraph requests instead of + // waiting forever. + int64 request_id = 11; + + // Next: 12 +} + +message RunGraphResponse { + // A list of tensors corresponding to those requested by + // `RunGraphRequest.recv_key`. + repeated NamedTensorProto recv = 1; + + // If the request asked for execution stats, the cost graph, or the partition + // graphs, these are returned here. + // TODO(suharshs): Package these in a RunMetadata instead. + StepStats step_stats = 2; + CostGraphDef cost_graph = 3; + repeated GraphDef partition_graph = 4; + + // If store_errors_in_response_body is true in the request, then + // optionally the server may return an OK status for the RPC and + // fill the true status into the fields below, to allow for messages + // that are too long to fit in metadata. + error.Code status_code = 5; + string status_error_message = 6; +} + +//////////////////////////////////////////////////////////////////////////////// +// +// CleanupGraph method request/response messages +// +// After the master receives RunGraph responses from all workers, the +// master instructs every worker to cleanup any remaining state of a +// step (e.g. tensors buffered by a `Send` op but not picked up by +// other workers). The master does not necessarily need to wait for +// completion of CleanupGraph calls. +// +// Workers should cleanup step states automatically according to a +// TTL-based policy in case of master restarts. +// +//////////////////////////////////////////////////////////////////////////////// + +message CleanupGraphRequest { + int64 step_id = 1; +} + +message CleanupGraphResponse {} + +//////////////////////////////////////////////////////////////////////////////// +// +// RecvTensor method request/response messages +// +//////////////////////////////////////////////////////////////////////////////// + +message RecvTensorRequest { + // The step in which the tensor will be produced. + // + // REQUIRED: This must eventually correspond to the `step_id` passed + // into a RunGraph call on the same WorkerService. + int64 step_id = 1; + + // A key identifying the channel to receive tensors from. A RecvTensor request + // retrieves one tensor from the channel, but multiple tensors can be sent and + // received over the same channel with multiple RecvTensor requests. See + // rendezvous.h for details. + string rendezvous_key = 2; + + // If true, use an out-of-band DMA mechanism to transfer the + // received tensor. + bool dma_ok = 3; + + // Optional information on client-side device locality. + DeviceLocality client_locality = 4; + + // Optional information on server-side device locality. + DeviceLocality server_locality = 5; + + // Optional information needed by the RPC subsystem. + google.protobuf.Any transport_options = 6; + + // Unique identifier for this request. Every RecvTensorRequest must have a + // unique request_id, and retried RecvTensorRequests must have the same + // request_id. If request_id is zero, retry detection and response cache + // are disabled. + // + // Retried RecvTensorRequests are problematic because a RecvTensor with no + // corresponding sender will wait forever, and the tensor may have been + // delivered to a previous retry. Workers use request_ids to reject retried + // RecvTensor requests instead of waiting forever. + int64 request_id = 7; +} + +message RecvTensorResponse { + // The tensor as a proto. + TensorProto tensor = 1; + + // If true, this tensor was the output of a dead node, and the + // content is invalid. + bool is_dead = 2; + + // The time at which tensor was available and started to be returned. + int64 send_start_micros = 3; + + // Optional additional information about how to receive the tensor, + // e.g. in the event that `RecvTensorRequest.dma_ok` was true. + google.protobuf.Any transport_options = 4; + + // Whether the receiver should send a MarkRecvFinishedRequest to the sender + // to ack the message. + bool require_ack = 5; +} + +// Message for managing the response cache maintained on the sender side. +// Currently only used by the gRPC worker service. +message MarkRecvFinishedRequest { + int64 request_id = 1; +} + +message MarkRecvFinishedResponse {} + +//////////////////////////////////////////////////////////////////////////////// +// +// Logging method request/response messages +// +// NOTE(mrry): This feature is not supported in the open-source +// version, and these messages are expected to change. +// +//////////////////////////////////////////////////////////////////////////////// + +// Out-of-band request to begin or end logging, or +// to retrieve logs for particular steps. +message LoggingRequest { + // If true, RPC logging will be enabled. + bool enable_rpc_logging = 1; + + // If true, RPC logging will be disabled. + bool disable_rpc_logging = 4; + + // If true, discard any saved logging data (for all steps). + bool clear = 2; + + // When set, requests all saved log data pertaining to the step. + // Any log data retrieved is eliminated from the store and cannot be + // retrieved again. + repeated int64 fetch_step_id = 3; +} + +message LabeledStepStats { + int64 step_id = 1; + StepStats step_stats = 2; +} + +message LoggingResponse { + repeated LabeledStepStats step = 1; +} + +//////////////////////////////////////////////////////////////////////////////// +// +// Tracing method request/response messages +// +// NOTE(mrry): This feature is not supported in the open-source +// version, and these messages are expected to change. +// +//////////////////////////////////////////////////////////////////////////////// + +message TraceOpts { + // Length of the trace to be taken, in seconds. + double duration = 1; + // If true, capture step profile locally in each worker. Currently + // unimplemented. + bool use_step_profiler = 2; + // If true, capture kernel events from each worker. + bool use_kernel_profiler = 3; + // If true, capture extended profiling events from TensorFlow process. + bool use_extended_profiler = 4; + // If true, capture GPU profiling events locally on each + // machine. Currently unimplemented. + bool use_gpu_profiler = 5; + // If true, collect sampled profile events. Currently unimplemented. + bool use_sample_profiler = 6; +} + +// Out-of-band request to configure distributed tracing. +message TracingRequest { + TraceOpts options = 1; +} + +message TracingResponse {} + +//////////////////////////////////////////////////////////////////////////////// +// +// Raw data transfers in support of Collective Ops. +// These methods are experimental and subject to change. +// +// The intention is to allow collectives to take advantage of the most +// efficient methods available on a platform, e.g. RDMA, and not be +// constrained to use the RPC system in use by other methods. +// +//////////////////////////////////////////////////////////////////////////////// + +message RecvBufRequest { + // Use of the fields below may vary by implementation. For example + // the buf_ptr and num_bytes may be set only for local operations and + // not sent on the wire, or only sent on the wire in one direction. + + // Used at server side to find the correct BufRendezvous. + int64 step_id = 1; + + // Arbitrary string identifying a BufRendezvous entry. + string buf_rendezvous_key = 2; + + // Size of value expected, must agree with BufRendezvous entry. + int64 num_bytes = 3; + + // When RDMA is in use, address of destination field on client. + fixed64 buf_ptr = 4; + + // Optional information on client-side device locality. + DeviceLocality client_locality = 5; + + // Optional information on server-side device locality. + DeviceLocality server_locality = 6; + + // Optional, implementation-specific data. + google.protobuf.Any transport_options = 7; + // For annotating timeline and device incarnation check. + string src_device = 8; + // Optional, for annotating the timeline. + string dst_device = 9; + + // Depending on the RPC system in use, it may be necessary to set this + // id to detect resends of RPCs where the server is not aware that + // the prior RPC failed. + int64 request_id = 10; + + // Incarnation number of the source device, used to detect worker failures. + uint64 src_incarnation = 11; +} + +message RecvBufResponse { + // Use of the fields below may vary by implementation. Comments give + // intended use. + + fixed64 buf_ptr = 1; // Address of source field on server. + int64 num_bytes = 2; // Byte length of buf_ptr field, if set. + bool is_dead = 3; // True if value is 'dead' like a tensor. + // Optional, implementation-specific data. + google.protobuf.Any transport_options = 4; + // Optional, for timeline. + int64 send_start_micros = 5; + + // Whether the receiver should send a MarkRecvFinishedRequest to the sender + // to ack the message. + bool require_ack = 6; +} + +//////////////////////////////////////////////////////////////////////////////// +// +// Collective Op dynamic group resolution messages. +// +//////////////////////////////////////////////////////////////////////////////// + +// Supplies one or more device names as members of the group identified by +// group_key. Service will respond when all group_size devices become known. +// All devices in group must have same type. +message CompleteGroupRequest { + int32 group_key = 1; + int32 group_size = 2; + string device_type = 3; + repeated string device_name = 4; + int32 collective_type = 5; +} + +// Gives the complete membership of the group identified by group_key. +message CompleteGroupResponse { + int32 group_key = 1; + int32 group_size = 2; + string device_type = 3; + int32 num_tasks = 4; // number of distinct tasks hosting the devices + repeated string device_name = 5; + repeated string task_name = 6; // task name prefixes of device_names + bytes communicator_key = 7; +} + +// Supplies data about one collective op belonging to the instance identified +// by instance_key. Service will respond when all group_size ops have +// become known. Most of the data being sent is for correctness checking, +// to ensure that all ops in the instance share common attributes. +message CompleteInstanceRequest { + string name = 1; + int32 type = 2; + DataType data_type = 3; + TensorShapeProto shape = 4; + int32 group_key = 5; + int32 group_size = 6; + int32 instance_key = 7; + string device_type = 8; + repeated int32 subdiv_offset = 9; + string device = 10; + bool is_source = 11; +} + +// Confirms that every op in the instance has consistently declared itself. +// Also gives the source_rank in case of broadcast. +message CompleteInstanceResponse { + int32 instance_key = 1; + int32 source_rank = 2; + reserved 3; +} + +// Request for next agreed-upon step_id for the specified graph_keys. +// This is used to enable multiple graphs containing nodes from +// a common collective instance to coordinate using the same step_ids. +message GetStepSequenceRequest { + repeated int64 graph_key = 1; +} + +message StepSequence { + int64 graph_key = 1; + int64 next_step_id = 2; +} + +// Next valid step_ids for one or more graph_keys. +message GetStepSequenceResponse { + repeated StepSequence step_sequence = 1; +} diff --git a/executor/proto/tensorflow/core/protobuf/worker_service.pb.go b/executor/proto/tensorflow/core/protobuf/worker_service.pb.go new file mode 100644 index 0000000000..6f39ebf144 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/worker_service.pb.go @@ -0,0 +1,59 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow/core/protobuf/worker_service.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +func init() { + proto.RegisterFile("tensorflow/core/protobuf/worker_service.proto", fileDescriptor_cf631a047edac54b) +} + +var fileDescriptor_cf631a047edac54b = []byte{ + // 469 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x94, 0x4f, 0x6f, 0x13, 0x31, + 0x10, 0xc5, 0xe1, 0x42, 0xe8, 0x88, 0x2a, 0xc8, 0xbd, 0xa5, 0x2d, 0x94, 0x96, 0x3f, 0x27, 0x12, + 0x09, 0xae, 0x5c, 0xba, 0x09, 0x0a, 0x08, 0x90, 0x4a, 0x12, 0x09, 0x29, 0x12, 0x42, 0x9b, 0x65, + 0xe2, 0xae, 0xd8, 0xd8, 0xee, 0xd8, 0x6e, 0xbf, 0x36, 0x1f, 0xa1, 0xda, 0x5d, 0x3b, 0xb1, 0x5b, + 0x27, 0xbd, 0xad, 0xde, 0x7b, 0xf3, 0x9b, 0x89, 0xe2, 0x19, 0x78, 0x6f, 0x50, 0x68, 0x49, 0xcb, + 0x4a, 0xde, 0x0c, 0x0a, 0x49, 0x38, 0x50, 0x24, 0x8d, 0x5c, 0xd8, 0xe5, 0xe0, 0x46, 0xd2, 0x3f, + 0xa4, 0x3f, 0x1a, 0xe9, 0xba, 0x2c, 0xb0, 0xdf, 0xe8, 0xac, 0xbb, 0x89, 0xf7, 0x39, 0xa9, 0xa2, + 0xf7, 0xe6, 0x81, 0xfa, 0xb6, 0xee, 0xc3, 0xff, 0x3d, 0xd8, 0xff, 0xd5, 0x08, 0xd3, 0x96, 0xc7, + 0xbe, 0xc0, 0xde, 0x18, 0xcd, 0xd4, 0xe4, 0xc6, 0x6a, 0x76, 0xd4, 0x0f, 0xb8, 0x6b, 0x79, 0x82, + 0x57, 0x16, 0xb5, 0xe9, 0x1d, 0x6f, 0x71, 0xb5, 0x92, 0x42, 0x23, 0x5b, 0xc2, 0xc1, 0x90, 0x30, + 0x37, 0xe8, 0x1b, 0x68, 0x5d, 0x4a, 0xc1, 0xde, 0x86, 0x55, 0x89, 0x80, 0xa7, 0xbf, 0x7b, 0x30, + 0xb7, 0xe9, 0x33, 0xc2, 0x0a, 0x77, 0xf6, 0x49, 0x04, 0x92, 0x7d, 0x92, 0x39, 0xd7, 0x67, 0x06, + 0xfb, 0x13, 0xe4, 0xa5, 0x36, 0x48, 0x63, 0xca, 0xd5, 0x25, 0x3b, 0x09, 0x2b, 0x23, 0xcb, 0xb3, + 0x5f, 0xed, 0x48, 0x38, 0xea, 0x1c, 0xba, 0x23, 0xa4, 0x88, 0x7b, 0x1a, 0x4f, 0x44, 0x29, 0xf2, + 0xd9, 0xce, 0x8c, 0x63, 0x7f, 0x86, 0xa7, 0x13, 0x2b, 0x5a, 0xe8, 0x61, 0x34, 0x8a, 0x53, 0x3d, + 0xed, 0x28, 0x6d, 0x3a, 0xcc, 0x4f, 0x78, 0x36, 0xac, 0x30, 0x17, 0x56, 0xb5, 0xa8, 0x97, 0xd1, + 0x3f, 0x13, 0x38, 0x1e, 0x77, 0xb2, 0x3d, 0xe0, 0x90, 0xdf, 0x00, 0x9c, 0x7e, 0x5e, 0x55, 0xec, + 0x38, 0x91, 0x3f, 0xaf, 0x2a, 0x8f, 0x7b, 0xb1, 0xcd, 0x76, 0xb0, 0x1f, 0x00, 0x13, 0x2c, 0xae, + 0x67, 0x4d, 0x28, 0x86, 0x6d, 0xf4, 0x24, 0x2c, 0xb4, 0x5b, 0xd8, 0xe9, 0x23, 0x96, 0x41, 0xe7, + 0xbb, 0xe4, 0xbc, 0x14, 0x9c, 0xf5, 0xc2, 0xb0, 0x13, 0x3d, 0xe8, 0x30, 0xe9, 0xb9, 0x91, 0x32, + 0xe8, 0xcc, 0x28, 0x2f, 0xee, 0x31, 0x9c, 0x98, 0x64, 0xac, 0x3d, 0xc7, 0x18, 0x41, 0xa7, 0x9e, + 0x2f, 0xb3, 0xcb, 0x98, 0xe1, 0xc4, 0x24, 0x63, 0xed, 0xad, 0x7f, 0xcd, 0x1c, 0xba, 0xcd, 0x6a, + 0xa2, 0x9a, 0xd6, 0x05, 0xa2, 0xc0, 0xf8, 0x7d, 0xdd, 0x31, 0x93, 0xef, 0xeb, 0x5e, 0x66, 0xb3, + 0x11, 0x43, 0xb9, 0x52, 0xf5, 0xca, 0x8c, 0x49, 0x5a, 0x15, 0x6f, 0x44, 0x64, 0x25, 0x37, 0xe2, + 0x4e, 0xc2, 0x51, 0x7f, 0xc3, 0x73, 0x6f, 0x7c, 0x15, 0xda, 0xe4, 0xf5, 0xc8, 0x67, 0xa9, 0x32, + 0xef, 0x7a, 0xf6, 0xeb, 0xdd, 0xa1, 0x16, 0x9f, 0x5d, 0x41, 0x4f, 0x12, 0x0f, 0xa3, 0x7f, 0x4b, + 0x6d, 0xc8, 0x0a, 0x53, 0xae, 0x30, 0x3b, 0x88, 0xae, 0xe1, 0x45, 0x7d, 0x24, 0xf5, 0xc5, 0xe3, + 0xf9, 0x27, 0x5e, 0x9a, 0x4b, 0xbb, 0xe8, 0x17, 0x72, 0x35, 0x08, 0x2e, 0x6b, 0xfa, 0x93, 0xcb, + 0xf8, 0xe4, 0x2e, 0x9e, 0x34, 0x5f, 0x1f, 0x6f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xdf, 0x25, 0x57, + 0x29, 0xd5, 0x05, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/core/protobuf/worker_service.proto b/executor/proto/tensorflow/core/protobuf/worker_service.proto new file mode 100644 index 0000000000..9ebbd553f2 --- /dev/null +++ b/executor/proto/tensorflow/core/protobuf/worker_service.proto @@ -0,0 +1,89 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto3"; + +package tensorflow.grpc; +option java_outer_classname = "WorkerServiceProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.distruntime"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf"; +import "tensorflow/core/protobuf/worker.proto"; + +//////////////////////////////////////////////////////////////////////////////// +// +// WorkerService defines a TensorFlow service that executes dataflow +// graphs on a set of local devices, on behalf of a MasterService. +// +// A worker service keeps track of multiple "registered graphs". Each +// registered graph is a subgraph of a client's graph, corresponding to +// only the nodes that should execute on this worker (and any +// additional nodes necessary for inter-process communication using +// the `RecvTensor` method). +// +//////////////////////////////////////////////////////////////////////////////// + +service WorkerService { + // See worker.proto for details. + rpc GetStatus(GetStatusRequest) returns (GetStatusResponse); + + // See worker.proto for details. + rpc CreateWorkerSession(CreateWorkerSessionRequest) + returns (CreateWorkerSessionResponse); + + // See worker.proto for details. + rpc DeleteWorkerSession(DeleteWorkerSessionRequest) + returns (DeleteWorkerSessionResponse); + + // See worker.proto for details. + rpc RegisterGraph(RegisterGraphRequest) returns (RegisterGraphResponse); + + // See worker.proto for details. + rpc DeregisterGraph(DeregisterGraphRequest) returns (DeregisterGraphResponse); + + // See worker.proto for details. + rpc RunGraph(RunGraphRequest) returns (RunGraphResponse); + + // See worker.proto for details. + rpc CleanupGraph(CleanupGraphRequest) returns (CleanupGraphResponse); + + // See worker.proto for details. + rpc CleanupAll(CleanupAllRequest) returns (CleanupAllResponse); + + // See worker.proto for details. + rpc RecvTensor(RecvTensorRequest) returns (RecvTensorResponse) { + // RecvTensor Method + } + + // See worker.proto for details. + rpc Logging(LoggingRequest) returns (LoggingResponse); + + // See worker.proto for details. + rpc Tracing(TracingRequest) returns (TracingResponse); + + // See worker.proto for details. + rpc RecvBuf(RecvBufRequest) returns (RecvBufResponse) { + } + + // See worker.proto for details. + rpc GetStepSequence(GetStepSequenceRequest) returns (GetStepSequenceResponse); + + // See worker.proto for details. + rpc CompleteGroup(CompleteGroupRequest) returns (CompleteGroupResponse); + + // See worker.proto for details. + rpc CompleteInstance(CompleteInstanceRequest) + returns (CompleteInstanceResponse); +} diff --git a/executor/proto/tensorflow/core/util/event.proto b/executor/proto/tensorflow/core/util/event.proto new file mode 100644 index 0000000000..ee1040d757 --- /dev/null +++ b/executor/proto/tensorflow/core/util/event.proto @@ -0,0 +1,118 @@ +syntax = "proto3"; + +package tensorflow; + +import "tensorflow/core/framework/summary.proto"; + +option cc_enable_arenas = true; +option java_outer_classname = "EventProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.util"; + +// Protocol buffer representing an event that happened during +// the execution of a Brain model. +message Event { + // Timestamp of the event. + double wall_time = 1; + + // Global step of the event. + int64 step = 2; + + oneof what { + // An event file was started, with the specified version. + // This is use to identify the contents of the record IO files + // easily. Current version is "brain.Event:2". All versions + // start with "brain.Event:". + string file_version = 3; + // An encoded version of a GraphDef. + bytes graph_def = 4; + // A summary was generated. + Summary summary = 5; + // The user output a log message. Not all messages are logged, only ones + // generated via the Python tensorboard_logging module. + LogMessage log_message = 6; + // The state of the session which can be used for restarting after crashes. + SessionLog session_log = 7; + // The metadata returned by running a session.run() call. + TaggedRunMetadata tagged_run_metadata = 8; + // An encoded version of a MetaGraphDef. + bytes meta_graph_def = 9; + } +} + +// Protocol buffer used for logging messages to the events file. +message LogMessage { + enum Level { + UNKNOWN = 0; + // Note: The logging level 10 cannot be named DEBUG. Some software + // projects compile their C/C++ code with -DDEBUG in debug builds. So the + // C++ code generated from this file should not have an identifier named + // DEBUG. + DEBUGGING = 10; + INFO = 20; + WARN = 30; + ERROR = 40; + FATAL = 50; + } + Level level = 1; + string message = 2; +} + +// Protocol buffer used for logging session state. +message SessionLog { + enum SessionStatus { + STATUS_UNSPECIFIED = 0; + START = 1; + STOP = 2; + CHECKPOINT = 3; + } + + SessionStatus status = 1; + // This checkpoint_path contains both the path and filename. + string checkpoint_path = 2; + string msg = 3; +} + +// For logging the metadata output for a single session.run() call. +message TaggedRunMetadata { + // Tag name associated with this metadata. + string tag = 1; + // Byte-encoded version of the `RunMetadata` proto in order to allow lazy + // deserialization. + bytes run_metadata = 2; +} + +// Worker heartbeat messages. Support for these operations is currently +// internal and expected to change. + +// Current health status of a worker. +enum WorkerHealth { + OK = 0; // By default a worker is healthy. + RECEIVED_SHUTDOWN_SIGNAL = 1; + INTERNAL_ERROR = 2; + SHUTTING_DOWN = 3; // Worker has been instructed to shutdown after a timeout. +} + +// Indicates the behavior of the worker when an internal error or shutdown +// signal is received. +enum WorkerShutdownMode { + DEFAULT = 0; + NOT_CONFIGURED = 1; + WAIT_FOR_COORDINATOR = 2; + SHUTDOWN_AFTER_TIMEOUT = 3; +} + +message WatchdogConfig { + int64 timeout_ms = 1; +} + +message WorkerHeartbeatRequest { + WorkerShutdownMode shutdown_mode = 1; + WatchdogConfig watchdog_config = 2; +} + +message WorkerHeartbeatResponse { + WorkerHealth health_status = 1; + repeated Event worker_log = 2; + string hostname = 3; +} diff --git a/executor/proto/tensorflow/core/util/example_proto_fast_parsing_test.proto b/executor/proto/tensorflow/core/util/example_proto_fast_parsing_test.proto new file mode 100644 index 0000000000..ebd4af47e3 --- /dev/null +++ b/executor/proto/tensorflow/core/util/example_proto_fast_parsing_test.proto @@ -0,0 +1,21 @@ +// Protocol message for the fast Example parse unit test. +syntax = "proto3"; + +import "tensorflow/core/example/feature.proto"; +option cc_enable_arenas = true; + +package tensorflow; + +// This message is parallel to Example, but with additional fields to test +// unknown fields handling in example_proto_fast_parsing_test.cc. +message ExampleWithExtras { + Features features = 1; + + string extra1 = 1337; + int64 extra2 = 1338; + fixed32 extra3 = 1339; + fixed64 extra4 = 1340; + double extra5 = 1341; + repeated float extra6 = 1342; + Features extra7 = 1343; +}; diff --git a/executor/proto/tensorflow/core/util/memmapped_file_system.proto b/executor/proto/tensorflow/core/util/memmapped_file_system.proto new file mode 100644 index 0000000000..a988b45b6f --- /dev/null +++ b/executor/proto/tensorflow/core/util/memmapped_file_system.proto @@ -0,0 +1,31 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +syntax = "proto3"; + +package tensorflow; + +option cc_enable_arenas = true; + +// A message that describes one region of memmapped file. +message MemmappedFileSystemDirectoryElement { + uint64 offset = 1; + string name = 2; + uint64 length = 3; +} + +// A directory of regions in a memmapped file. +message MemmappedFileSystemDirectory { + repeated MemmappedFileSystemDirectoryElement element = 1; +} diff --git a/executor/proto/tensorflow/core/util/saved_tensor_slice.proto b/executor/proto/tensorflow/core/util/saved_tensor_slice.proto new file mode 100644 index 0000000000..6278685957 --- /dev/null +++ b/executor/proto/tensorflow/core/util/saved_tensor_slice.proto @@ -0,0 +1,84 @@ +// Protocol buffers for saved tensor slices. It's used for the brain tensor +// ops checkpoints and the V3 checkpoints in dist_belief. + +// A checkpoint file is an sstable. The value for each record is a serialized +// SavedTensorSlices message (defined below). +// +// Each checkpoint file has a record with the empty key (""), which corresponds +// to a SavedTensorSlices message that contains a "meta", that serves as a +// table of contents on all the tensor slices saved in this file. Since the key +// is "", it's always the first record in each file. +// +// Each of the rest of the records in a checkpoint stores the raw data of a +// particular tensor slice, in SavedSlice format. The corresponding key is an +// ordered code that encodes the name of the tensor and the slice +// information. The name is also stored in the SaveSlice message for ease of +// debugging and manual examination. + +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; +option java_outer_classname = "SavedTensorSliceProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.util"; + +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/tensor_slice.proto"; +import "tensorflow/core/framework/tensor.proto"; +import "tensorflow/core/framework/types.proto"; +import "tensorflow/core/framework/versions.proto"; + +// Metadata describing the set of slices of the same tensor saved in a +// checkpoint file. +message SavedSliceMeta { + // Name of the tensor. + string name = 1; + + // Shape of the tensor + TensorShapeProto shape = 2; + + // Type of the tensor + DataType type = 3; + + // Explicit list of slices saved in the checkpoint file. + repeated TensorSliceProto slice = 4; +}; + +// Metadata describing the set of tensor slices saved in a checkpoint file. +// It is always stored at the beginning of each checkpoint file. +message SavedTensorSliceMeta { + // Each SavedSliceMeta describes the slices for one tensor. + repeated SavedSliceMeta tensor = 1; + + // Compatibility version of this checkpoint. See core/public/version.h + // for version history. + VersionDef versions = 2; +}; + +// Saved tensor slice: it stores the name of the tensors, the slice, and the +// raw data. +message SavedSlice { + // Name of the tensor that this slice belongs to. This must be identical to + // the name used to encode the key for this record. + string name = 1; + + // Extent of the slice. Must have one entry for each of the dimension of the + // tensor that this slice belongs to. + TensorSliceProto slice = 2; + + // The raw data of the slice is stored as a TensorProto. Only raw data are + // stored (we don't fill in fields such as dtype or tensor_shape). + TensorProto data = 3; +}; + +// Each record in a v3 checkpoint file is a serialized SavedTensorSlices +// message. +message SavedTensorSlices { + // This is only present at the first item of each checkpoint file and serves + // as a table of contents, listing all the tensor slices saved in this file. + SavedTensorSliceMeta meta = 1; + + // This exists in all but the first item of each checkpoint file. + SavedSlice data = 2; +}; diff --git a/executor/proto/tensorflow/core/util/test_log.proto b/executor/proto/tensorflow/core/util/test_log.proto new file mode 100644 index 0000000000..ddb0599388 --- /dev/null +++ b/executor/proto/tensorflow/core/util/test_log.proto @@ -0,0 +1,219 @@ +// Protocol messages for describing the results of benchmarks and unit tests. +syntax = "proto3"; + +import "google/protobuf/any.proto"; +import "google/protobuf/wrappers.proto"; + +option cc_enable_arenas = true; +option java_outer_classname = "TestLogProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.util.testlog"; + +package tensorflow; + +message EntryValue { + oneof kind { + double double_value = 1; + string string_value = 2; + } +}; + +message MetricEntry { + // Metric name + string name = 1; + + // Metric value + double value = 2; + + // The minimum acceptable value for the metric if specified + google.protobuf.DoubleValue min_value = 3; + + // The maximum acceptable value for the metric if specified + google.protobuf.DoubleValue max_value = 4; +} + +// Each unit test or benchmark in a test or benchmark run provides +// some set of information. Here we provide some reasonable keys +// one would expect to see, with optional key/value pairs for things +// we haven't considered. +// +// This BenchmarkEntry should be emitted by each unit test or benchmark +// reporter. +message BenchmarkEntry { + // The name of the specific benchmark or test + // (e.g. BM_AdjustContrast_gpu_B_W_H) + string name = 1; + + // If a benchmark, how many iterations it was run for + int64 iters = 2; + + // Total cpu time used for all iterations (in seconds) + double cpu_time = 3; + + // Total wall time used for all iterations (in seconds) + double wall_time = 4; + + // Throughput (in MB/s) + double throughput = 5; + + // Generic map from result key to value. + map extras = 6; + + // Metric name, value and expected range. This can include accuracy metrics + // typically used to determine whether the accuracy test has passed + repeated MetricEntry metrics = 7; +}; + +message BenchmarkEntries { + repeated BenchmarkEntry entry = 1; +} + +message BuildConfiguration { + string mode = 1; // opt, dbg, etc + repeated string cc_flags = 2; // CC compiler flags, if known + repeated string opts = 3; // Bazel compilation options, if known +}; + +message CommitId { + oneof kind { + // Submitted changelist. + int64 changelist = 1; + string hash = 2; + } + // Hash of intermediate change between hash/changelist and what was tested. + // Not used if the build is from a commit without modifications. + string snapshot = 3; + // Changelist tested if the change list is not already submitted. + int64 pending_changelist = 4; +}; + +message CPUInfo { + int64 num_cores = 1; + + int64 num_cores_allowed = 2; + + // How fast are these cpus? + double mhz_per_cpu = 3; + + // Additional cpu information. For example, + // Intel Ivybridge with HyperThreading (24 cores) dL1:32KB dL2:256KB dL3:30MB + string cpu_info = 4; + + // What kind of cpu scaling is enabled on the host. + // Examples include "performance", "ondemand", "conservative", "mixed". + string cpu_governor = 5; + + // Cache sizes (in bytes), e.g. "L2": 262144 (for 256KB) + map cache_size = 6; +}; + +message MemoryInfo { + int64 total = 1; // Total virtual memory in bytes + int64 available = 2; // Immediately available memory in bytes +} + +message GPUInfo { + string model = 1; // e.g. "Tesla K40c" + string uuid = 2; // Final entry in output of "nvidia-smi -L" + string bus_id = 3; // e.g. "0000:04:00.0" +}; + +message PlatformInfo { + string bits = 1; // e.g. '64bit' + string linkage = 2; // e.g. 'ELF' + string machine = 3; // e.g. 'i386' + string release = 4; // e.g. '3.13.0-76-generic' + string system = 5; // e.g. 'Linux' + string version = 6; // e.g. '#120-Ubuntu SMP Mon Jan 18 15:59:10 UTC 2016' +}; + +message AvailableDeviceInfo { // Matches DeviceAttributes + string name = 1; // Device name. + string type = 2; // Device type, e.g. 'CPU' or 'GPU'. + int64 memory_limit = 3; // Memory capacity in bytes. + string physical_description = 4; // The physical description of this device. +}; + +message MachineConfiguration { + // Host name of machine that ran the benchmark. + string hostname = 1; + + // Unique serial number of the machine. + string serial_identifier = 7; + + // Additional platform information. + PlatformInfo platform_info = 2; + + // CPU Information. + CPUInfo cpu_info = 3; + + // Other devices that are attached and relevant (e.g. GPUInfo). + repeated google.protobuf.Any device_info = 4; + + // Devices accessible to the test (e.g. as given by list_local_devices). + repeated AvailableDeviceInfo available_device_info = 5; + + MemoryInfo memory_info = 6; +}; + +// Run-specific items such as arguments to the test / benchmark. +message RunConfiguration { + repeated string argument = 1; +} + +// The output of one benchmark / test run. Each run contains a list of +// tests or benchmarks, stored as BenchmarkEntry messages. +// +// This message should be emitted by the reporter (which runs the +// test / BM in a subprocess and then reads the emitted BenchmarkEntry messages; +// usually from a serialized json file, finally collecting them along +// with additional information about the test run. +message TestResults { + // The target of the run, e.g.: + // //tensorflow/core:kernels_adjust_contrast_op_benchmark_test + string target = 1; + + // The list of tests or benchmarks in this run. + BenchmarkEntries entries = 2; + + // The configuration of the build (compiled opt? with cuda? any copts?) + BuildConfiguration build_configuration = 3; + + // The commit id (git hash or changelist) + CommitId commit_id = 4; + + // The time the run started (in seconds of UTC time since Unix epoch) + int64 start_time = 5; + + // The amount of time the total run took (wall time in seconds) + double run_time = 6; + + // Machine-specific parameters (Platform and CPU info) + MachineConfiguration machine_configuration = 7; + + // Run-specific parameters (arguments, etc) + RunConfiguration run_configuration = 8; + + // Benchmark target identifier. + string name = 9; + + // The type of benchmark. + enum BenchmarkType { + UNKNOWN = 0; // Fallback for protos written before Type was introduced. + CPP_MICROBENCHMARK = 1; + PYTHON_BENCHMARK = 2; + ANDROID_BENCHMARK = 3; + } + BenchmarkType benchmark_type = 10; + + // Used for differentiating between continuous and debug builds. + // Must be one of: + // * cbuild: results from continuous build. + // * presubmit: results from oneshot requests. + // * culprit: results from culprit finder rerun. + string run_mode = 11; + + // TensorFlow version this benchmark runs against. + // This can be either set to full version or just the major version. + string tf_version = 12; +}; diff --git a/executor/proto/tensorflow/lite/toco/model_flags.proto b/executor/proto/tensorflow/lite/toco/model_flags.proto new file mode 100644 index 0000000000..dfc425073f --- /dev/null +++ b/executor/proto/tensorflow/lite/toco/model_flags.proto @@ -0,0 +1,184 @@ +// Copyright 2017 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +syntax = "proto2"; +import "tensorflow/lite/toco/types.proto"; + +package toco; + +message InputArrayShape { + repeated int32 dims = 2; +} + +// Next ID to USE: 7. +message InputArray { + // Name of the input arrays, i.e. the arrays from which input activations + // will be read. + optional string name = 1; + + // Shape of the input. For many applications the dimensions are {batch, + // height, width, depth}. Often the batch is left "unspecified" by providing + // a value of -1. + // + // The last dimension is typically called 'depth' or 'channels'. For example, + // for an image model taking RGB images as input, this would have the value 3. + optional InputArrayShape shape = 6; + + // mean_value and std_value parameters control the interpretation of raw input + // activation values (elements of the input array) as real numbers. The + // mapping is given by: + // + // real_value = (raw_input_value - mean_value) / std_value + // + // In particular, the defaults (mean_value=0, std_value=1) yield + // real_value = raw_input_value. Often, non-default values are used in image + // models. For example, an image model taking uint8 image channel values as + // its raw inputs, in [0, 255] range, may use mean_value=128, std_value=128 to + // map them into the interval [-1, 1). + // + // Note: this matches exactly the meaning of mean_value and std_value in + // (TensorFlow via LegacyFedInput). + optional float mean_value = 3; + optional float std_value = 4 [default = 1.]; + + // Data type of the input. + // + // In many graphs, the input arrays already have defined data types, + // e.g. Placeholder nodes in a TensorFlow GraphDef have a dtype attribute. + // In those cases, it is not needed to specify this data_type flag. + // The purpose of this flag is only to define the data type of input + // arrays whose type isn't defined in the input graph file. For example, + // when specifying an arbitrary (not Placeholder) --input_array into + // a TensorFlow GraphDef. + // + // When this data_type is quantized (e.g. QUANTIZED_UINT8), the + // corresponding quantization parameters are the mean_value, std_value + // fields. + // + // It is also important to understand the nuance between this data_type + // flag and the inference_input_type in TocoFlags. The basic difference + // is that this data_type (like all ModelFlags) describes a property + // of the input graph, while inference_input_type (like all TocoFlags) + // describes an aspect of the toco transformation process and thus of + // the output file. The types of input arrays may be different between + // the input and output files if quantization or dequantization occurred. + // Such differences can only occur for real-number data i.e. only + // between FLOAT and quantized types (e.g. QUANTIZED_UINT8). + optional IODataType data_type = 5; +} + +message RnnState { + optional string state_array = 1; + optional string back_edge_source_array = 2; + optional bool discardable = 5; + // size allows to specify a 1-D shape for the RNN state array. + // Will be expanded with 1's to fit the model. + // TODO(benoitjacob): should allow a generic, explicit shape. + optional int32 size = 3; + optional int32 num_dims = 4; +} + +// An ArraysExtraInfo message stores a collection of additional Information +// about arrays in a model, complementing the information in the model itself. +// It is intentionally a separate message so that it may be serialized and +// passed separately from the model. See --arrays_extra_info_file. +// +// A typical use case is to manually specify MinMax for specific arrays in a +// model that does not already contain such MinMax information. +message ArraysExtraInfo { + message Entry { + // Next ID to use: 8. + optional string name = 1; + optional string name_regexp = 7; + optional double min = 2; + optional double max = 3; + optional IODataType data_type = 4; + optional InputArrayShape shape = 5; + optional float constant_float_value = 6; + } + repeated Entry entries = 1; +} + +// ModelFlags encodes properties of a model that, depending on the file +// format, may or may not be recorded in the model file. The purpose of +// representing these properties in ModelFlags is to allow passing them +// separately from the input model file, for instance as command-line +// parameters, so that we can offer a single uniform interface that can +// handle files from different input formats. +// +// For each of these properties, and each supported file format, we +// detail in comments below whether the property exists in the given file +// format. +// +// Obsolete flags that have been removed: +// optional int32 input_depth = 3; +// optional int32 input_width = 4; +// optional int32 input_height = 5; +// optional int32 batch = 6 [ default = 1]; +// optional float mean_value = 7; +// optional float std_value = 8 [default = 1.]; +// optional int32 input_dims = 11 [ default = 4]; +// repeated int32 input_shape = 13; +// +// Next ID to USE: 20. +message ModelFlags { + // Information about the input arrays, i.e. the arrays from which input + // activations will be read. + repeated InputArray input_arrays = 1; + + // Name of the output arrays, i.e. the arrays into which output activations + // will be written. + repeated string output_arrays = 2; + + // If true, the model accepts an arbitrary batch size. Mutually exclusive with + // the 'batch' field: at most one of these two fields can be set. + optional bool variable_batch = 10; + + repeated RnnState rnn_states = 12; + + // Checks applied to the model, typically after toco's comprehensive + // graph transformations. + // Next ID to USE: 4. + message ModelCheck { + // Use the name of a type of operator to check its counts. + // Use "Total" for overall operator counts. + // Use "Arrays" for overall array counts. + optional string count_type = 1 [default = "None"]; + // A count of zero is a meaningful check, so negative used to mean disable. + optional int32 count_min = 2 [default = -1]; + // If count_max < count_min, then count_min is only allowed value. + optional int32 count_max = 3 [default = -1]; + } + repeated ModelCheck model_checks = 14; + + // If true, will allow passing inexistent arrays in --input_arrays + // and --output_arrays. This makes little sense, is only useful to + // more easily get graph visualizations. + optional bool allow_nonexistent_arrays = 16; + + // If true, will allow passing non-ascii-printable characters in + // --input_arrays and --output_arrays. By default (if false), only + // ascii printable characters are allowed, i.e. character codes + // ranging from 32 to 127. This is disallowed by default so as to + // catch common copy-and-paste issues where invisible unicode + // characters are unwittingly added to these strings. + optional bool allow_nonascii_arrays = 17; + + // If set, this ArraysExtraInfo allows to pass extra information about arrays + // not specified in the input model file, such as extra MinMax information. + optional ArraysExtraInfo arrays_extra_info = 18; + + // When set to false, toco will not change the input ranges and the output + // ranges of concat operator to the overlap of all input ranges. + optional bool change_concat_input_ranges = 19 [default = true]; +} diff --git a/executor/proto/tensorflow/lite/toco/toco_flags.proto b/executor/proto/tensorflow/lite/toco/toco_flags.proto new file mode 100644 index 0000000000..8e3550ded1 --- /dev/null +++ b/executor/proto/tensorflow/lite/toco/toco_flags.proto @@ -0,0 +1,221 @@ +// Copyright 2017 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +syntax = "proto2"; + +package toco; + +import "tensorflow/lite/toco/types.proto"; + +// Supported I/O file formats. Some formats may be input-only or output-only. +enum FileFormat { + FILE_FORMAT_UNKNOWN = 0; + + // GraphDef, third_party/tensorflow/core/framework/graph.proto + TENSORFLOW_GRAPHDEF = 1; + + // Tensorflow's mobile inference model. + // third_party/tensorflow/contrib/tflite/schema.fbs + TFLITE = 2; + + // GraphViz + // Export-only. + GRAPHVIZ_DOT = 3; +} + +// TocoFlags encodes extra parameters that drive tooling operations, that +// are not normally encoded in model files and in general may not be thought +// of as properties of models, instead describing how models are to be +// processed in the context of the present tooling job. +// +// Next ID to use: 31. +message TocoFlags { + // Input file format + optional FileFormat input_format = 1; + + // Output file format + optional FileFormat output_format = 2; + + // Similar to inference_type, but allows to control specifically the + // quantization of input arrays, separately from other arrays. + // + // If not set, then the value of inference_type is implicitly used, i.e. + // by default input arrays are quantized like other arrays. + // + // Like inference_type, this only affects real-number arrays. By "real-number" + // we mean float arrays, and quantized arrays. This excludes plain + // integer arrays, strings arrays, and every other data type. + // + // The typical use for this flag is for vision models taking a bitmap + // as input, typically with uint8 channels, yet still requiring floating-point + // inference. For such image models, the uint8 input is quantized, i.e. + // the uint8 values are interpreted as real numbers, and the quantization + // parameters used for such input arrays are their mean_value, std_value + // parameters. + optional IODataType inference_input_type = 11; + + // Sets the type of real-number arrays in the output file, that is, controls + // the representation (quantization) of real numbers in the output file, + // except for input arrays, which are controlled by inference_input_type. + // + // NOTE: this flag only impacts real-number arrays. By "real-number" + // we mean float arrays, and quantized arrays. This excludes plain + // integer arrays, strings arrays, and every other data type. + // + // For real-number arrays, the impact of this flag is to allow the output + // file to choose a different real-numbers representation (quantization) + // from what the input file used. For any other types of arrays, changing + // the data type would not make sense. + // + // Specifically: + // - If FLOAT, then real-numbers arrays will be of type float in + // the output file. If they were quantized in the input file, then + // they get dequantized. + // - If QUANTIZED_UINT8, then real-numbers arrays will be quantized + // as uint8 in the output file. If they were float in the input file, + // then they get quantized. + // - If not set, then all real-numbers arrays retain the same type in the + // output file as they have in the input file. + // + optional IODataType inference_type = 4; + + // default_ranges_min and default_ranges_max are helpers to experiment + // with quantization of models. Normally, quantization requires the input + // model to have (min, max) range information for every activations array. + // This is needed in order to know how to quantize arrays and still achieve + // satisfactory accuracy. However, in some circumstances one would just like + // to estimate the performance of quantized inference, without caring about + // accuracy. That is what default_ranges_min and default_ranges_max are for: + // when specified, they will be used as default (min, max) range boundaries + // for all activation arrays that lack (min, max) range information, thus + // allowing for quantization to proceed. + // + // It should be clear from the above explanation that these parameters are + // for experimentation purposes only and should not be used in production: + // they make it easy to quantize models, but the resulting quantized model + // will be inaccurate. + // + // These values only apply to arrays quantized with the kUint8 data type. + optional float default_ranges_min = 5; + optional float default_ranges_max = 6; + // Equivalent versions of default_ranges_min/_max for arrays quantized with + // the kInt16 data type. + optional float default_int16_ranges_min = 15; + optional float default_int16_ranges_max = 16; + + // Ignore and discard FakeQuant nodes. For instance, that can be used to + // generate plain float code without fake-quantization from a quantized + // graph. + optional bool drop_fake_quant = 7; + + // Normally, FakeQuant nodes must be strict boundaries for graph + // transformations, in order to ensure that quantized inference has the + // exact same arithmetic behavior as quantized training --- which is the + // whole point of quantized training and of FakeQuant nodes in the first + // place. However, that entails subtle requirements on where exactly + // FakeQuant nodes must be placed in the graph. Some quantized graphs + // have FakeQuant nodes at unexpected locations, that prevent graph + // transformations that are necessary in order to generate inference + // code for these graphs. Such graphs should be fixed, but as a + // temporary work-around, setting this reorder_across_fake_quant flag + // allows toco to perform necessary graph transformations on them, + // at the cost of no longer faithfully matching inference and training + // arithmetic. + optional bool reorder_across_fake_quant = 8; + + // If true, allow TOCO to create TF Lite Custom operators for all the + // unsupported Tensorflow ops. + optional bool allow_custom_ops = 10; + + // Applies only to the case when the input format is TENSORFLOW_GRAPHDEF. + // If true, then control dependencies will be immediately dropped during + // import. + // If not set, the default behavior is as follows: + // - Default to false if the output format is TENSORFLOW_GRAPHDEF. + // - Default to true in all other cases. + optional bool drop_control_dependency = 12; + + // Disables transformations that fuse subgraphs such as known LSTMs (not all + // LSTMs are identified). + optional bool debug_disable_recurrent_cell_fusion = 13; + + // Uses the FakeQuantWithMinMaxArgs.num_bits attribute to adjust quantized + // array data types throughout the graph. The graph must be properly annotated + // with FakeQuant* ops on at least the edges and may contain additional ops on + // the interior of the graph to widen/narrow as desired. + // + // Input and output array data types may change because of this propagation + // and users must be sure to query the final data_type values. + optional bool propagate_fake_quant_num_bits = 14; + + // Some fast uint8 GEMM kernels require uint8 weights to avoid the value 0. + // This flag allows nudging them to 1 to allow proceeding, with moderate + // inaccuracy. + optional bool allow_nudging_weights_to_use_fast_gemm_kernel = 17; + + // Minimum size of constant arrays to deduplicate; arrays smaller will not be + // deduplicated. + optional int64 dedupe_array_min_size_bytes = 18 [default = 64]; + + // Split the LSTM inputs from 5 tensors to 18 tensors for TFLite. + // Ignored if the output format is not TFLite. + optional bool split_tflite_lstm_inputs = 19 [default = true]; + + // Store weights as quantized weights followed by dequantize operations. + // Computation is still done in float, but reduces model size (at the cost of + // accuracy and latency). + // DEPRECATED: Please use post_training_quantize instead. + optional bool quantize_weights = 20 [default = false]; + + // Full filepath of folder to dump the graphs at various stages of processing + // GraphViz .dot files. Preferred over --output_format=GRAPHVIZ_DOT in order + // to keep the requirements of the output file. + optional string dump_graphviz_dir = 24; + + // Boolean indicating whether to dump the graph after every graph + // transformation. + optional bool dump_graphviz_include_video = 25; + + // Boolean indicating whether to quantize the weights of the converted float + // model. Model size will be reduced and there will be latency improvements + // (at the cost of accuracy). + optional bool post_training_quantize = 26 [default = false]; + + // This flag only works when converting to TensorFlow Lite format. + // When enabled, unsupported ops will be converted to select TensorFlow ops. + // TODO(ycling): Consider to rename the following 2 flags and don't call it + // "Flex". + // `enable_select_tf_ops` should always be used with `allow_custom_ops`. + // WARNING: Experimental interface, subject to change + optional bool enable_select_tf_ops = 27 [default = false]; + + // This flag only works when converting to TensorFlow Lite format. + // When enabled, all TensorFlow ops will be converted to select TensorFlow + // ops. + // This will force `enable_select_tf_ops` to true. + // `force_select_tf_ops` should always be used with `enable_select_tf_ops`. + // WARNING: Experimental interface, subject to change + optional bool force_select_tf_ops = 28 [default = false]; + + // Boolean indicating whether to convert float32 constant buffers to + // float16. This is typically done to reduce model size. Delegates may also + // wish to implement kernels on reduced precision floats for performance + // gains. + optional bool quantize_to_float16 = 29 [default = false]; + + // Boolean flag indicating whether the converter should allow models with + // dynamic Tensor shape. When set to False, the converter will generate + // runtime memory offsets for activation Tensors (with 128 bits alignment) + // and error out on models with undetermined Tensor shape. (Default: True) + optional bool allow_dynamic_tensors = 30 [default = true]; +} diff --git a/executor/proto/tensorflow/lite/toco/types.proto b/executor/proto/tensorflow/lite/toco/types.proto new file mode 100644 index 0000000000..2c65551743 --- /dev/null +++ b/executor/proto/tensorflow/lite/toco/types.proto @@ -0,0 +1,52 @@ +// Copyright 2017 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +syntax = "proto2"; + +package toco; + +// IODataType describes the numeric data types of input and output arrays +// of a model. +enum IODataType { + IO_DATA_TYPE_UNKNOWN = 0; + + // Float32, not quantized + FLOAT = 1; + + // Uint8, quantized + QUANTIZED_UINT8 = 2; + + // Int32, not quantized + INT32 = 3; + + // Int64, not quantized + INT64 = 4; + + // String, not quantized + STRING = 5; + + // Int16, quantized + QUANTIZED_INT16 = 6; + + // Boolean + BOOL = 7; + + // Complex64, not quantized + COMPLEX64 = 8; + + // Int8, quantized based on QuantizationParameters in schema. + INT8 = 9; + + // Half precision float, not quantized. + FLOAT16 = 10; +} diff --git a/executor/proto/tensorflow/lite/tools/evaluation/proto/evaluation_config.proto b/executor/proto/tensorflow/lite/tools/evaluation/proto/evaluation_config.proto new file mode 100644 index 0000000000..f95892c8bc --- /dev/null +++ b/executor/proto/tensorflow/lite/tools/evaluation/proto/evaluation_config.proto @@ -0,0 +1,46 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto2"; + +package tflite.evaluation; + +import "tensorflow/lite/tools/evaluation/proto/evaluation_stages.proto"; + +option cc_enable_arenas = true; +option java_multiple_files = true; +option java_package = "tflite.evaluation"; + +// Contains parameters that define how an EvaluationStage will be executed. +// This would typically be validated only once during initialization, so should +// not contain any variables that change with each run. +// +// Next ID: 3 +message EvaluationStageConfig { + optional string name = 1; + + // Specification defining what this stage does, and any required parameters. + optional ProcessSpecification specification = 2; +} + +// Metrics returned from EvaluationStage.LatestMetrics() need not have all +// fields set. +message EvaluationStageMetrics { + // Total number of times the EvaluationStage is run. + optional int32 num_runs = 1; + + // Process-specific numbers such as latencies, accuracy, etc. + optional ProcessMetrics process_metrics = 2; +} diff --git a/executor/proto/tensorflow/lite/tools/evaluation/proto/evaluation_stages.proto b/executor/proto/tensorflow/lite/tools/evaluation/proto/evaluation_stages.proto new file mode 100644 index 0000000000..74ab8c2a71 --- /dev/null +++ b/executor/proto/tensorflow/lite/tools/evaluation/proto/evaluation_stages.proto @@ -0,0 +1,317 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto2"; + +package tflite.evaluation; + +option cc_enable_arenas = true; +option java_multiple_files = true; +option java_package = "tflite.evaluation"; + +// Defines the functionality executed by an EvaluationStage. +// +// Next ID: 7 +message ProcessSpecification { + oneof params { + ImagePreprocessingParams image_preprocessing_params = 1; + TopkAccuracyEvalParams topk_accuracy_eval_params = 2; + TfliteInferenceParams tflite_inference_params = 3; + ImageClassificationParams image_classification_params = 4; + ObjectDetectionAveragePrecisionParams + object_detection_average_precision_params = 5; + ObjectDetectionParams object_detection_params = 6; + } +} + +// Latency numbers in microseconds, based on all EvaluationStage::Run() calls so +// far. +// +// Next ID: 7 +message LatencyMetrics { + // Latency for the last Run. + optional int64 last_us = 1; + // Maximum latency observed for any Run. + optional int64 max_us = 2; + // Minimum latency observed for any Run. + optional int64 min_us = 3; + // Sum of all Run latencies. + optional int64 sum_us = 4; + // Average latency across all Runs. + optional double avg_us = 5; + // Standard deviation for latency across all Runs. + optional int64 std_deviation_us = 6; +} + +// Statistics for an accuracy value over multiple runs of evaluation. +// +// Next ID: 5 +message AccuracyMetrics { + // Maximum value observed for any Run. + optional float max_value = 1; + // Minimum value observed for any Run. + optional float min_value = 2; + // Average value across all Runs. + optional double avg_value = 3; + // Standard deviation across all Runs. + optional float std_deviation = 4; +} + +// Contains process-specific metrics, which may differ based on what an +// EvaluationStage does. +// +// Next ID: 8 +message ProcessMetrics { + optional LatencyMetrics total_latency = 1; + + oneof stage_metrics { + TopkAccuracyEvalMetrics topk_accuracy_metrics = 2; + TfliteInferenceMetrics tflite_inference_metrics = 3; + ImageClassificationMetrics image_classification_metrics = 4; + InferenceProfilerMetrics inference_profiler_metrics = 5; + ObjectDetectionAveragePrecisionMetrics + object_detection_average_precision_metrics = 6; + ObjectDetectionMetrics object_detection_metrics = 7; + } +} + +// Parameters that define how images are preprocessed. +// +// Next ID: 5 +message ImagePreprocessingParams { + // Required. + optional int32 image_height = 1; + // Required. + optional int32 image_width = 2; + // Same as tflite::TfLiteType. + optional int32 output_type = 3; + // Fraction for central-cropping. + // A central cropping-fraction of 0.875 is considered best for Inception + // models, hence the default value. See: + // https://github.com/tensorflow/tpu/blob/master/models/experimental/inception/inception_preprocessing.py#L296 + // Set to 0 to disable cropping. + optional float cropping_fraction = 4 [default = 0.875]; +} + +// Parameters that control TFLite inference. +// +// Next ID: 5 +message TfliteInferenceParams { + // Required + optional string model_file_path = 1; + + enum Delegate { + NONE = 0; + NNAPI = 1; + GPU = 2; + } + optional Delegate delegate = 2; + // Number of threads available to the TFLite Interpreter. + optional int32 num_threads = 3 [default = 1]; + + // Defines how many times the TFLite Interpreter is invoked for every input. + // This helps benchmark cases where extensive pre-processing might not be + // required for every input. + optional int32 invocations_per_run = 4 [default = 1]; +} + +// Metrics specific to TFLite inference. +// +// Next ID: 2 +message TfliteInferenceMetrics { + // Number of times the interpreter is invoked. + optional int32 num_inferences = 1; +} + +// Parameters that define how top-K accuracy is evaluated. +// +// Next ID: 2 +message TopkAccuracyEvalParams { + // Required. + optional int32 k = 1; +} + +// Metrics from top-K accuracy evaluation. +// +// Next ID: 2 +message TopkAccuracyEvalMetrics { + // A repeated field of size |k| where the ith element denotes the fraction of + // samples for which the correct label was present in the top (i + 1) model + // outputs. + // For example, topk_accuracies(1) will contain the fraction of + // samples for which the model returned the correct label as the top first or + // second output. + repeated float topk_accuracies = 1; +} + +// Parameters that define how the Image Classification task is evaluated +// end-to-end. +// +// Next ID: 3 +message ImageClassificationParams { + // Required. + // TfLite model should have 1 input & 1 output tensor. + // Input shape: {1, image_height, image_width, 3} + // Output shape: {1, num_total_labels} + optional TfliteInferenceParams inference_params = 1; + + // Optional. + // If not set, accuracy evaluation is not performed. + optional TopkAccuracyEvalParams topk_accuracy_eval_params = 2; +} + +// Metrics from evaluation of the image classification task. +// +// Next ID: 5 +message ImageClassificationMetrics { + optional LatencyMetrics pre_processing_latency = 1; + optional LatencyMetrics inference_latency = 2; + optional TfliteInferenceMetrics inference_metrics = 3; + // Not set if topk_accuracy_eval_params was not populated in + // ImageClassificationParams. + optional TopkAccuracyEvalMetrics topk_accuracy_metrics = 4; +} + +// Metrics computed from comparing TFLite execution in two settings: +// 1. User-defined TfliteInferenceParams (The 'test' setting) +// 2. Default TfliteInferenceParams (The 'reference' setting) +// +// Next ID: 4 +message InferenceProfilerMetrics { + // Latency metrics from Single-thread CPU inference. + optional LatencyMetrics reference_latency = 1; + // Latency from TfliteInferenceParams under test. + optional LatencyMetrics test_latency = 2; + // For reference & test output vectors {R, T}, the error is computed as: + // Mean([Abs(R[i] - T[i]) for i in num_elements]) + // output_errors[v] : statistics for the error value of the vth output vector + // across all Runs. + repeated AccuracyMetrics output_errors = 3; +} + +// Proto containing information about all the objects (predicted or +// ground-truth) contained in an image. +// +// Next ID: 3 +message ObjectDetectionResult { + // One instance of an object detected in an image. + // Next ID: 4 + message ObjectInstance { + // Defines the bounding box for a detected object. + // Next ID: 5 + message NormalizedBoundingBox { + // All boundaries defined below are required. + // Each boundary value should be normalized with respect to the image + // dimensions. This helps evaluate detections independent of image size. + // For example, normalized_top = top_boundary / image_height. + optional float normalized_top = 1; + optional float normalized_bottom = 2; + optional float normalized_left = 3; + optional float normalized_right = 4; + } + + // Required. + optional int32 class_id = 1; + // Required + optional NormalizedBoundingBox bounding_box = 2; + // Value in (0, 1.0] denoting confidence in this prediction. + // Default value of 1.0 for ground-truth data. + optional float score = 3 [default = 1.0]; + } + + repeated ObjectInstance objects = 1; + // Required for ground-truth data, to compare against inference results. + optional string image_name = 2; +} + +// Proto containing ground-truth ObjectsSets for all images in a COCO validation +// set. +// +// Next ID: 2 +message ObjectDetectionGroundTruth { + repeated ObjectDetectionResult detection_results = 1; +} + +// Parameters that define how Average Precision is computed for Object Detection +// task. +// Refer for details: http://cocodataset.org/#detection-eval +// +// Next ID: 4 +message ObjectDetectionAveragePrecisionParams { + // Total object classes. The AP value returned for each IoU threshold is an + // average over all classes encountered in predicted/ground truth sets. + optional int32 num_classes = 1; + // A predicted box matches a ground truth box if and only if + // IoU between these two are larger than an IoU threshold. + // AP is computed for all relevant {IoU threshold, class} combinations and + // averaged to get mAP. + // If left empty, evaluation is done for all IoU threshods in the range + // 0.5:0.05:0.95 (min:increment:max). + repeated float iou_thresholds = 2; + // AP is computed as the average of maximum precision at (1 + // + num_recall_points) recall levels. E.g., if num_recall_points is 10, + // recall levels are 0., 0.1, 0.2, ..., 0.9, 1.0. + // Default: 100 + optional int32 num_recall_points = 3 [default = 100]; +} + +// Average Precision metrics from Object Detection task. +// +// Next ID: 3 +message ObjectDetectionAveragePrecisionMetrics { + // Average Precision value for a particular IoU threshold. + // Next ID: 3 + message AveragePrecision { + optional float iou_threshold = 1; + optional float average_precision = 2; + } + + // One entry for each in + // ObjectDetectionAveragePrecisionParams::iou_thresholds, averaged over all + // classes. + repeated AveragePrecision individual_average_precisions = 1; + // Average of Average Precision across all IoU thresholds. + optional float overall_mean_average_precision = 2; +} + +// Parameters that define how the Object Detection task is evaluated +// end-to-end. +// +// Next ID: 4 +message ObjectDetectionParams { + // Required. + // Model's outputs should be same as a TFLite-compatible SSD model. + // Refer: + // https://www.tensorflow.org/lite/models/object_detection/overview#output + // TODO(b/133772912): Generalize support for other types of object detection + // models. + optional TfliteInferenceParams inference_params = 1; + // Optional. Used to match ground-truth categories with model output. + // SSD Mobilenet V1 Model trained on COCO assumes class 0 is background class + // in the label file and class labels start from 1 to number_of_classes+1. + // Therefore, default value is set as 1. + optional int32 class_offset = 2 [default = 1]; + optional ObjectDetectionAveragePrecisionParams ap_params = 3; +} + +// Metrics from evaluation of the object detection task. +// +// Next ID: 5 +message ObjectDetectionMetrics { + optional LatencyMetrics pre_processing_latency = 1; + optional LatencyMetrics inference_latency = 2; + optional TfliteInferenceMetrics inference_metrics = 3; + optional ObjectDetectionAveragePrecisionMetrics average_precision_metrics = 4; +} diff --git a/executor/proto/tensorflow/python/framework/cpp_shape_inference.proto b/executor/proto/tensorflow/python/framework/cpp_shape_inference.proto new file mode 100644 index 0000000000..11199a9720 --- /dev/null +++ b/executor/proto/tensorflow/python/framework/cpp_shape_inference.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; + +import "tensorflow/core/framework/types.proto"; +import "tensorflow/core/framework/tensor_shape.proto"; + +message CppShapeInferenceResult { + message HandleShapeAndType { + TensorShapeProto shape = 1; + DataType dtype = 2; + } + message HandleData { + bool is_set = 1; + + // Only valid if . + repeated HandleShapeAndType shape_and_type = 2; + } + TensorShapeProto shape = 1; + + reserved 2; // was handle_shape + reserved 3; // was handle_dtype + HandleData handle_data = 4; +} + +message CppShapeInferenceInputsNeeded { + repeated int32 input_tensors_needed = 1; + repeated int32 input_tensors_as_shapes_needed = 2; +} diff --git a/executor/proto/tensorflow/python/kernel_tests/proto/test_example.proto b/executor/proto/tensorflow/python/kernel_tests/proto/test_example.proto new file mode 100644 index 0000000000..b1ce66de4f --- /dev/null +++ b/executor/proto/tensorflow/python/kernel_tests/proto/test_example.proto @@ -0,0 +1,181 @@ +// Test description and protos to work with it. + +syntax = "proto2"; + +import "tensorflow/core/framework/types.proto"; + +package tensorflow.contrib.proto; + +// A TestCase holds a proto and assertions about how it should decode. +message TestCase { + // Batches of primitive values. + repeated TestValue values = 1; + // The batch shapes. + repeated int32 shapes = 2; + // Expected sizes for each field. + repeated int32 sizes = 3; + // Expected values for each field. + repeated FieldSpec fields = 4; +}; + +// FieldSpec describes the expected output for a single field. +message FieldSpec { + optional string name = 1; + optional tensorflow.DataType dtype = 2; + optional TestValue value = 3; +}; + +// NOTE: This definition must be kept in sync with PackedTestValue. +message TestValue { + repeated double double_value = 1; + repeated float float_value = 2; + repeated int64 int64_value = 3; + repeated uint64 uint64_value = 4; + repeated int32 int32_value = 5; + repeated fixed64 fixed64_value = 6; + repeated fixed32 fixed32_value = 7; + repeated bool bool_value = 8; + repeated string string_value = 9; + repeated bytes bytes_value = 12; + repeated uint32 uint32_value = 13; + repeated sfixed32 sfixed32_value = 15; + repeated sfixed64 sfixed64_value = 16; + repeated sint32 sint32_value = 17; + repeated sint64 sint64_value = 18; + repeated PrimitiveValue message_value = 19; + + // Optional fields with explicitly-specified defaults. + optional double double_value_with_default = 20 [default = 1.0]; + optional float float_value_with_default = 21 [default = 2.0]; + optional int64 int64_value_with_default = 22 [default = 3]; + optional uint64 uint64_value_with_default = 23 [default = 4]; + optional int32 int32_value_with_default = 24 [default = 5]; + optional fixed64 fixed64_value_with_default = 25 [default = 6]; + optional fixed32 fixed32_value_with_default = 26 [default = 7]; + optional bool bool_value_with_default = 27 [default = true]; + optional string string_value_with_default = 28 [default = "a"]; + optional bytes bytes_value_with_default = 29 + [default = "a longer default string"]; + optional uint32 uint32_value_with_default = 30 [default = 9]; + optional sfixed32 sfixed32_value_with_default = 31 [default = 10]; + optional sfixed64 sfixed64_value_with_default = 32 [default = 11]; + optional sint32 sint32_value_with_default = 33 [default = 12]; + optional sint64 sint64_value_with_default = 34 [default = 13]; + + extensions 100 to 199; +} + +// A PackedTestValue looks exactly the same as a TestValue in the text format, +// but the binary serializion is different. We test the packed representations +// by loading the same test cases using this definition instead of TestValue. +// +// NOTE: This definition must be kept in sync with TestValue in every way except +// the packed=true declaration and the lack of extensions. +message PackedTestValue { + repeated double double_value = 1 [packed = true]; + repeated float float_value = 2 [packed = true]; + repeated int64 int64_value = 3 [packed = true]; + repeated uint64 uint64_value = 4 [packed = true]; + repeated int32 int32_value = 5 [packed = true]; + repeated fixed64 fixed64_value = 6 [packed = true]; + repeated fixed32 fixed32_value = 7 [packed = true]; + repeated bool bool_value = 8 [packed = true]; + repeated string string_value = 9; + repeated bytes bytes_value = 12; + repeated uint32 uint32_value = 13 [packed = true]; + repeated sfixed32 sfixed32_value = 15 [packed = true]; + repeated sfixed64 sfixed64_value = 16 [packed = true]; + repeated sint32 sint32_value = 17 [packed = true]; + repeated sint64 sint64_value = 18 [packed = true]; + repeated PrimitiveValue message_value = 19; + + optional double double_value_with_default = 20 [default = 1.0]; + optional float float_value_with_default = 21 [default = 2.0]; + optional int64 int64_value_with_default = 22 [default = 3]; + optional uint64 uint64_value_with_default = 23 [default = 4]; + optional int32 int32_value_with_default = 24 [default = 5]; + optional fixed64 fixed64_value_with_default = 25 [default = 6]; + optional fixed32 fixed32_value_with_default = 26 [default = 7]; + optional bool bool_value_with_default = 27 [default = true]; + optional string string_value_with_default = 28 [default = "a"]; + optional bytes bytes_value_with_default = 29 + [default = "a longer default string"]; + optional uint32 uint32_value_with_default = 30 [default = 9]; + optional sfixed32 sfixed32_value_with_default = 31 [default = 10]; + optional sfixed64 sfixed64_value_with_default = 32 [default = 11]; + optional sint32 sint32_value_with_default = 33 [default = 12]; + optional sint64 sint64_value_with_default = 34 [default = 13]; +} + +message PrimitiveValue { + optional double double_value = 1; + optional float float_value = 2; + optional int64 int64_value = 3; + optional uint64 uint64_value = 4; + optional int32 int32_value = 5; + optional fixed64 fixed64_value = 6; + optional fixed32 fixed32_value = 7; + optional bool bool_value = 8; + optional string string_value = 9; + optional bytes bytes_value = 12; + optional uint32 uint32_value = 13; + optional sfixed32 sfixed32_value = 15; + optional sfixed64 sfixed64_value = 16; + optional sint32 sint32_value = 17; + optional sint64 sint64_value = 18; +} + +// Message containing fields with field numbers higher than any field above. +// An instance of this message is prepended to each binary message in the test +// to exercise the code path that handles fields encoded out of order of field +// number. +message ExtraFields { + optional string string_value = 1776; + optional bool bool_value = 1777; +} + +extend TestValue { + repeated PrimitiveValue ext_value = 100; +} + +// The messages below are for yet-to-be created tests. + +message EnumValue { + enum Color { + RED = 0; + ORANGE = 1; + YELLOW = 2; + GREEN = 3; + BLUE = 4; + INDIGO = 5; + VIOLET = 6; + }; + optional Color enum_value = 14; + repeated Color repeated_enum_value = 15; +} + + +message InnerMessageValue { + optional float float_value = 2; + repeated bytes bytes_values = 8; +} + +message MiddleMessageValue { + repeated int32 int32_values = 5; + optional InnerMessageValue message_value = 11; + optional uint32 uint32_value = 13; +} + +message MessageValue { + optional double double_value = 1; + optional MiddleMessageValue message_value = 11; +} + +message RepeatedMessageValue { + message NestedMessageValue { + optional float float_value = 2; + repeated bytes bytes_values = 8; + } + + repeated NestedMessageValue message_values = 11; +} diff --git a/executor/proto/tensorflow/python/tpu/tensor_tracer.proto b/executor/proto/tensorflow/python/tpu/tensor_tracer.proto new file mode 100644 index 0000000000..ad5392d65f --- /dev/null +++ b/executor/proto/tensorflow/python/tpu/tensor_tracer.proto @@ -0,0 +1,74 @@ +syntax = "proto3"; + +package tensorflow; + +import "tensorflow/core/framework/graph.proto"; + +// Tensor Tracer Report proto gives information about the trace including: +// - TensorTracerConfig: version, device, num replicas, trace mode. +// - Graphdef, e.g., list of operations, tensors +// - TracedTensorDef: +// * Name of the tensor +// * Tracepoint name if provided. +// * Index of the tensor in the compact cache if traced. +// * Explanation for why the tensor is traced or not. +message TensorTracerReport { + TensorTracerConfig config = 1; + + // Tensorflow graph. + tensorflow.GraphDef graphdef = 2; + + // A map from tensor name to its TracedTensorDef. + map tensordef = 3; + + message TensorTracerConfig { + // Tensor tracer version, e.g. hostcall, outside compilation. + string version = 1; + // Traced device, CPU, TPU... + string device = 2; + + // Trace mode, norm, summary, full-trace. + string trace_mode = 3; + + // Number of cores, e.g. TPU cores, in the system. + int32 num_cores = 4; + + // Number of hosts, e.g. compute nodes in the system. + int32 num_hosts = 5; + + // Keep submode as string for backward compatibility. + string submode = 6; + + // Keep num cores per host for backward compatibility. + int32 num_cores_per_host = 7; + + // Id of the included cores, if a subset of cores are traced. + repeated int32 included_cores = 8; + + // The names of the signatures corresponding to the cache indices. + repeated string signatures = 9; + } + + message TracedTensorDef { + // Name of the tensor as appears in tf graph. + string name = 1; + // Cache index of the tensor. This may be different than topological index. + int32 cache_index = 2; + // If trace points are provided, corresponding tracepoint name of the + // tensor. Trace points are placed on the edges (tensors) in the tensorflow + // graph, and they force tensor tracer to trace the corresponding tensor. + // Tracepoints can be added using the programatic interface + // tensor_tracer.tensor_tracepoint(tensor, trace_point_name) function. + // This will add a trace point with the given trace_point_name for the given + // tensor. If a trace_point is provided for the tensor, + // trace_point name will be used for the rest of the analysis instead of + // tensor names. One can use trace_point_name's to compare two models with + // arbitrary tensor names by providing the same trace point name for the + // tensors that are comparable. + string trace_point_name = 3; + // Whether the tensor is traced or not. + bool is_traced = 4; + // Detailed explanation why the tensor is traced or not. + string explanation = 5; + } +} diff --git a/executor/proto/tensorflow/python/training/checkpoint_state.proto b/executor/proto/tensorflow/python/training/checkpoint_state.proto new file mode 100644 index 0000000000..704f7fdc88 --- /dev/null +++ b/executor/proto/tensorflow/python/training/checkpoint_state.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package tensorflow; +option cc_enable_arenas = true; + +// Protocol buffer representing the checkpoint state. +message CheckpointState { + // Path to the most-recent model checkpoint. + string model_checkpoint_path = 1; + + // Paths to all not-yet-deleted model checkpoints, sorted from oldest to + // newest. + // Note that the value of model_checkpoint_path should be the last item in + // this list. + repeated string all_model_checkpoint_paths = 2; + // Unix timestamps corresponding to all_model_checkpoint_paths, indicating + // when each checkpoint was created. + repeated double all_model_checkpoint_timestamps = 3; + // Unix timestamp indicating the creation time for the last preserved + // checkpoint. + double last_preserved_timestamp = 4; +} diff --git a/executor/proto/tensorflow/python/util/protobuf/compare_test.proto b/executor/proto/tensorflow/python/util/protobuf/compare_test.proto new file mode 100644 index 0000000000..445ba55406 --- /dev/null +++ b/executor/proto/tensorflow/python/util/protobuf/compare_test.proto @@ -0,0 +1,49 @@ +// Test messages used in compare_test.py. +syntax = "proto2"; + +package compare_test; +option cc_enable_arenas = true; + +enum Enum { + A = 0; + B = 1; + C = 2; +} + +message Small { + repeated string strings = 1; +}; + +message Medium { + repeated int32 int32s = 1; + repeated Small smalls = 2; + repeated group GroupA = 3 { + repeated group GroupB = 4 { + required string strings = 5; + } + } + repeated float floats = 6; +}; + +message Large { + optional string string_ = 1; + optional int64 int64_ = 2; + optional float float_ = 3; + optional bool bool_ = 4; + optional Enum enum_ = 5; + repeated int64 int64s = 6; + optional Medium medium = 7; + optional Small small = 8; + optional double double_ = 9; + optional WithMap with_map = 10; +}; + +message Labeled { + required int32 required = 1; + optional int32 optional = 2; +} + +message WithMap { + map value_message = 1; + map value_string = 2; +} diff --git a/executor/proto/tensorflow/serving/class_registration_test.pb.go b/executor/proto/tensorflow/serving/class_registration_test.pb.go new file mode 100644 index 0000000000..0cbdf3c12d --- /dev/null +++ b/executor/proto/tensorflow/serving/class_registration_test.pb.go @@ -0,0 +1,166 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/util/class_registration_test.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Config1 struct { + StringField string `protobuf:"bytes,1,opt,name=string_field,json=stringField,proto3" json:"string_field,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Config1) Reset() { *m = Config1{} } +func (m *Config1) String() string { return proto.CompactTextString(m) } +func (*Config1) ProtoMessage() {} +func (*Config1) Descriptor() ([]byte, []int) { + return fileDescriptor_2a1690d17e90f318, []int{0} +} + +func (m *Config1) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Config1.Unmarshal(m, b) +} +func (m *Config1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Config1.Marshal(b, m, deterministic) +} +func (m *Config1) XXX_Merge(src proto.Message) { + xxx_messageInfo_Config1.Merge(m, src) +} +func (m *Config1) XXX_Size() int { + return xxx_messageInfo_Config1.Size(m) +} +func (m *Config1) XXX_DiscardUnknown() { + xxx_messageInfo_Config1.DiscardUnknown(m) +} + +var xxx_messageInfo_Config1 proto.InternalMessageInfo + +func (m *Config1) GetStringField() string { + if m != nil { + return m.StringField + } + return "" +} + +type Config2 struct { + StringField string `protobuf:"bytes,1,opt,name=string_field,json=stringField,proto3" json:"string_field,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Config2) Reset() { *m = Config2{} } +func (m *Config2) String() string { return proto.CompactTextString(m) } +func (*Config2) ProtoMessage() {} +func (*Config2) Descriptor() ([]byte, []int) { + return fileDescriptor_2a1690d17e90f318, []int{1} +} + +func (m *Config2) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Config2.Unmarshal(m, b) +} +func (m *Config2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Config2.Marshal(b, m, deterministic) +} +func (m *Config2) XXX_Merge(src proto.Message) { + xxx_messageInfo_Config2.Merge(m, src) +} +func (m *Config2) XXX_Size() int { + return xxx_messageInfo_Config2.Size(m) +} +func (m *Config2) XXX_DiscardUnknown() { + xxx_messageInfo_Config2.DiscardUnknown(m) +} + +var xxx_messageInfo_Config2 proto.InternalMessageInfo + +func (m *Config2) GetStringField() string { + if m != nil { + return m.StringField + } + return "" +} + +type MessageWithAny struct { + AnyField *any.Any `protobuf:"bytes,1,opt,name=any_field,json=anyField,proto3" json:"any_field,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageWithAny) Reset() { *m = MessageWithAny{} } +func (m *MessageWithAny) String() string { return proto.CompactTextString(m) } +func (*MessageWithAny) ProtoMessage() {} +func (*MessageWithAny) Descriptor() ([]byte, []int) { + return fileDescriptor_2a1690d17e90f318, []int{2} +} + +func (m *MessageWithAny) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageWithAny.Unmarshal(m, b) +} +func (m *MessageWithAny) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageWithAny.Marshal(b, m, deterministic) +} +func (m *MessageWithAny) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageWithAny.Merge(m, src) +} +func (m *MessageWithAny) XXX_Size() int { + return xxx_messageInfo_MessageWithAny.Size(m) +} +func (m *MessageWithAny) XXX_DiscardUnknown() { + xxx_messageInfo_MessageWithAny.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageWithAny proto.InternalMessageInfo + +func (m *MessageWithAny) GetAnyField() *any.Any { + if m != nil { + return m.AnyField + } + return nil +} + +func init() { + proto.RegisterType((*Config1)(nil), "tensorflow.serving.Config1") + proto.RegisterType((*Config2)(nil), "tensorflow.serving.Config2") + proto.RegisterType((*MessageWithAny)(nil), "tensorflow.serving.MessageWithAny") +} + +func init() { + proto.RegisterFile("tensorflow_serving/util/class_registration_test.proto", fileDescriptor_2a1690d17e90f318) +} + +var fileDescriptor_2a1690d17e90f318 = []byte{ + // 198 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x8e, 0x31, 0x4b, 0x04, 0x31, + 0x10, 0x46, 0xb9, 0x46, 0xbd, 0x9c, 0x58, 0x04, 0x0b, 0xb5, 0x52, 0x2b, 0x0b, 0x49, 0xb8, 0x13, + 0x7f, 0xc0, 0xb1, 0x60, 0x67, 0xb3, 0x8d, 0x65, 0xc8, 0xea, 0x24, 0x0e, 0x84, 0x19, 0xc9, 0xcc, + 0x2a, 0xf9, 0xf7, 0xe2, 0xae, 0xb2, 0x96, 0xd7, 0x3e, 0xde, 0xfb, 0xf8, 0xcc, 0xa3, 0x02, 0x09, + 0xd7, 0x54, 0xf8, 0x2b, 0x08, 0xd4, 0x4f, 0xa4, 0xec, 0x47, 0xc5, 0xe2, 0x5f, 0x4b, 0x14, 0x09, + 0x15, 0x32, 0x8a, 0xd6, 0xa8, 0xc8, 0x14, 0x14, 0x44, 0xdd, 0x47, 0x65, 0x65, 0x6b, 0x97, 0xcc, + 0xfd, 0x66, 0x57, 0x97, 0x99, 0x39, 0x17, 0xf0, 0x93, 0x31, 0x8c, 0xc9, 0x47, 0x6a, 0xb3, 0x7e, + 0x7b, 0x6f, 0x8e, 0x3b, 0xa6, 0x84, 0x79, 0x6b, 0x6f, 0xcc, 0xa9, 0x68, 0x45, 0xca, 0x21, 0x21, + 0x94, 0xb7, 0x8b, 0xd5, 0xf5, 0xea, 0x6e, 0xdd, 0x6f, 0x66, 0xf6, 0xf4, 0x83, 0x16, 0x7b, 0x77, + 0x88, 0xdd, 0x99, 0xb3, 0x67, 0x10, 0x89, 0x19, 0x5e, 0x50, 0xdf, 0xf7, 0xd4, 0xec, 0xd6, 0xac, + 0x23, 0xb5, 0x7f, 0xc5, 0x66, 0x77, 0xee, 0xe6, 0x73, 0xee, 0xef, 0x9c, 0xdb, 0x53, 0xeb, 0x4f, + 0x22, 0xb5, 0x69, 0x64, 0x38, 0x9a, 0xf8, 0xc3, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x42, 0xfa, + 0xee, 0xbf, 0x0f, 0x01, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/serving/classification.pb.go b/executor/proto/tensorflow/serving/classification.pb.go new file mode 100644 index 0000000000..b82416c10c --- /dev/null +++ b/executor/proto/tensorflow/serving/classification.pb.go @@ -0,0 +1,288 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/apis/classification.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// A single class. +type Class struct { + // Label or name of the class. + Label string `protobuf:"bytes,1,opt,name=label,proto3" json:"label,omitempty"` + // Score for this class (e.g., the probability the item belongs to this + // class). As per the proto3 default-value semantics, if the score is missing, + // it should be treated as 0. + Score float32 `protobuf:"fixed32,2,opt,name=score,proto3" json:"score,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Class) Reset() { *m = Class{} } +func (m *Class) String() string { return proto.CompactTextString(m) } +func (*Class) ProtoMessage() {} +func (*Class) Descriptor() ([]byte, []int) { + return fileDescriptor_ca7d76edb0ae62d6, []int{0} +} + +func (m *Class) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Class.Unmarshal(m, b) +} +func (m *Class) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Class.Marshal(b, m, deterministic) +} +func (m *Class) XXX_Merge(src proto.Message) { + xxx_messageInfo_Class.Merge(m, src) +} +func (m *Class) XXX_Size() int { + return xxx_messageInfo_Class.Size(m) +} +func (m *Class) XXX_DiscardUnknown() { + xxx_messageInfo_Class.DiscardUnknown(m) +} + +var xxx_messageInfo_Class proto.InternalMessageInfo + +func (m *Class) GetLabel() string { + if m != nil { + return m.Label + } + return "" +} + +func (m *Class) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +// List of classes for a single item (tensorflow.Example). +type Classifications struct { + Classes []*Class `protobuf:"bytes,1,rep,name=classes,proto3" json:"classes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Classifications) Reset() { *m = Classifications{} } +func (m *Classifications) String() string { return proto.CompactTextString(m) } +func (*Classifications) ProtoMessage() {} +func (*Classifications) Descriptor() ([]byte, []int) { + return fileDescriptor_ca7d76edb0ae62d6, []int{1} +} + +func (m *Classifications) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Classifications.Unmarshal(m, b) +} +func (m *Classifications) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Classifications.Marshal(b, m, deterministic) +} +func (m *Classifications) XXX_Merge(src proto.Message) { + xxx_messageInfo_Classifications.Merge(m, src) +} +func (m *Classifications) XXX_Size() int { + return xxx_messageInfo_Classifications.Size(m) +} +func (m *Classifications) XXX_DiscardUnknown() { + xxx_messageInfo_Classifications.DiscardUnknown(m) +} + +var xxx_messageInfo_Classifications proto.InternalMessageInfo + +func (m *Classifications) GetClasses() []*Class { + if m != nil { + return m.Classes + } + return nil +} + +// Contains one result per input example, in the same order as the input in +// ClassificationRequest. +type ClassificationResult struct { + Classifications []*Classifications `protobuf:"bytes,1,rep,name=classifications,proto3" json:"classifications,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClassificationResult) Reset() { *m = ClassificationResult{} } +func (m *ClassificationResult) String() string { return proto.CompactTextString(m) } +func (*ClassificationResult) ProtoMessage() {} +func (*ClassificationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_ca7d76edb0ae62d6, []int{2} +} + +func (m *ClassificationResult) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClassificationResult.Unmarshal(m, b) +} +func (m *ClassificationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClassificationResult.Marshal(b, m, deterministic) +} +func (m *ClassificationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClassificationResult.Merge(m, src) +} +func (m *ClassificationResult) XXX_Size() int { + return xxx_messageInfo_ClassificationResult.Size(m) +} +func (m *ClassificationResult) XXX_DiscardUnknown() { + xxx_messageInfo_ClassificationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_ClassificationResult proto.InternalMessageInfo + +func (m *ClassificationResult) GetClassifications() []*Classifications { + if m != nil { + return m.Classifications + } + return nil +} + +type ClassificationRequest struct { + // Model Specification. If version is not specified, will use the latest + // (numerical) version. + ModelSpec *ModelSpec `protobuf:"bytes,1,opt,name=model_spec,json=modelSpec,proto3" json:"model_spec,omitempty"` + // Input data. + Input *Input `protobuf:"bytes,2,opt,name=input,proto3" json:"input,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClassificationRequest) Reset() { *m = ClassificationRequest{} } +func (m *ClassificationRequest) String() string { return proto.CompactTextString(m) } +func (*ClassificationRequest) ProtoMessage() {} +func (*ClassificationRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ca7d76edb0ae62d6, []int{3} +} + +func (m *ClassificationRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClassificationRequest.Unmarshal(m, b) +} +func (m *ClassificationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClassificationRequest.Marshal(b, m, deterministic) +} +func (m *ClassificationRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClassificationRequest.Merge(m, src) +} +func (m *ClassificationRequest) XXX_Size() int { + return xxx_messageInfo_ClassificationRequest.Size(m) +} +func (m *ClassificationRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ClassificationRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ClassificationRequest proto.InternalMessageInfo + +func (m *ClassificationRequest) GetModelSpec() *ModelSpec { + if m != nil { + return m.ModelSpec + } + return nil +} + +func (m *ClassificationRequest) GetInput() *Input { + if m != nil { + return m.Input + } + return nil +} + +type ClassificationResponse struct { + // Effective Model Specification used for classification. + ModelSpec *ModelSpec `protobuf:"bytes,2,opt,name=model_spec,json=modelSpec,proto3" json:"model_spec,omitempty"` + // Result of the classification. + Result *ClassificationResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClassificationResponse) Reset() { *m = ClassificationResponse{} } +func (m *ClassificationResponse) String() string { return proto.CompactTextString(m) } +func (*ClassificationResponse) ProtoMessage() {} +func (*ClassificationResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ca7d76edb0ae62d6, []int{4} +} + +func (m *ClassificationResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClassificationResponse.Unmarshal(m, b) +} +func (m *ClassificationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClassificationResponse.Marshal(b, m, deterministic) +} +func (m *ClassificationResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClassificationResponse.Merge(m, src) +} +func (m *ClassificationResponse) XXX_Size() int { + return xxx_messageInfo_ClassificationResponse.Size(m) +} +func (m *ClassificationResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ClassificationResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ClassificationResponse proto.InternalMessageInfo + +func (m *ClassificationResponse) GetModelSpec() *ModelSpec { + if m != nil { + return m.ModelSpec + } + return nil +} + +func (m *ClassificationResponse) GetResult() *ClassificationResult { + if m != nil { + return m.Result + } + return nil +} + +func init() { + proto.RegisterType((*Class)(nil), "tensorflow.serving.Class") + proto.RegisterType((*Classifications)(nil), "tensorflow.serving.Classifications") + proto.RegisterType((*ClassificationResult)(nil), "tensorflow.serving.ClassificationResult") + proto.RegisterType((*ClassificationRequest)(nil), "tensorflow.serving.ClassificationRequest") + proto.RegisterType((*ClassificationResponse)(nil), "tensorflow.serving.ClassificationResponse") +} + +func init() { + proto.RegisterFile("tensorflow_serving/apis/classification.proto", fileDescriptor_ca7d76edb0ae62d6) +} + +var fileDescriptor_ca7d76edb0ae62d6 = []byte{ + // 303 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xc1, 0x4a, 0xf3, 0x40, + 0x14, 0x85, 0x99, 0x96, 0xf6, 0xa7, 0xb7, 0x8b, 0xc2, 0xd0, 0x5f, 0xaa, 0x20, 0x94, 0x74, 0x93, + 0x85, 0x24, 0xd0, 0x6c, 0x5d, 0x88, 0x05, 0xc1, 0x45, 0x37, 0xe3, 0x03, 0x94, 0x34, 0xde, 0xca, + 0xc0, 0x34, 0x33, 0xe6, 0x4e, 0xf4, 0x0d, 0x7c, 0x06, 0x1f, 0xd5, 0xa5, 0x64, 0x26, 0xa1, 0x24, + 0x35, 0x88, 0xbb, 0xdc, 0xf0, 0xdd, 0x73, 0xcf, 0x39, 0x0c, 0xdc, 0x58, 0xcc, 0x49, 0x17, 0x07, + 0xa5, 0xdf, 0x77, 0x84, 0xc5, 0x9b, 0xcc, 0x5f, 0xe2, 0xd4, 0x48, 0x8a, 0x33, 0x95, 0x12, 0xc9, + 0x83, 0xcc, 0x52, 0x2b, 0x75, 0x1e, 0x99, 0x42, 0x5b, 0xcd, 0xf9, 0x89, 0x8e, 0x6a, 0xfa, 0x6a, + 0xd5, 0xa7, 0x20, 0x73, 0x53, 0x5a, 0xbf, 0xd8, 0x0f, 0x1d, 0xf5, 0x33, 0x2a, 0x0f, 0x05, 0x09, + 0x8c, 0x36, 0xd5, 0x55, 0x3e, 0x87, 0x91, 0x4a, 0xf7, 0xa8, 0x16, 0x6c, 0xc9, 0xc2, 0x89, 0xf0, + 0x43, 0xf5, 0x97, 0x32, 0x5d, 0xe0, 0x62, 0xb0, 0x64, 0xe1, 0x40, 0xf8, 0x21, 0x78, 0x80, 0xd9, + 0xa6, 0x65, 0x95, 0x78, 0x02, 0xff, 0x9c, 0x7b, 0xa4, 0x05, 0x5b, 0x0e, 0xc3, 0xe9, 0xfa, 0x32, + 0x3a, 0xf7, 0x1d, 0xb9, 0x2d, 0xd1, 0x90, 0x01, 0xc2, 0xbc, 0xad, 0x23, 0x90, 0x4a, 0x65, 0xf9, + 0x16, 0x66, 0xed, 0x2a, 0x1a, 0xd1, 0x55, 0xaf, 0xe8, 0x09, 0x15, 0xdd, 0xdd, 0xe0, 0x83, 0xc1, + 0xff, 0xee, 0x9d, 0xd7, 0x12, 0xc9, 0xf2, 0x5b, 0x00, 0x57, 0xc6, 0x8e, 0x0c, 0x66, 0x2e, 0xf9, + 0x74, 0x7d, 0xfd, 0xd3, 0x8d, 0x6d, 0x45, 0x3d, 0x19, 0xcc, 0xc4, 0xe4, 0xd8, 0x7c, 0xf2, 0x18, + 0x46, 0xae, 0x6f, 0x57, 0x4e, 0x4f, 0xe2, 0xc7, 0x0a, 0x10, 0x9e, 0x0b, 0x3e, 0x19, 0x5c, 0x9c, + 0x05, 0x36, 0x3a, 0x27, 0xec, 0x38, 0x19, 0xfc, 0xd1, 0xc9, 0x1d, 0x8c, 0x0b, 0x57, 0x5d, 0x9d, + 0x21, 0xfc, 0xbd, 0x27, 0x5f, 0xb5, 0xa8, 0xf7, 0xee, 0x87, 0x5f, 0x8c, 0xed, 0xc7, 0xee, 0x4d, + 0x24, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa9, 0x20, 0xe3, 0xfc, 0xa1, 0x02, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/serving/file_system_storage_path_source.pb.go b/executor/proto/tensorflow/serving/file_system_storage_path_source.pb.go new file mode 100644 index 0000000000..0131db20f0 --- /dev/null +++ b/executor/proto/tensorflow/serving/file_system_storage_path_source.pb.go @@ -0,0 +1,477 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/sources/storage_path/file_system_storage_path_source.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Config proto for FileSystemStoragePathSource. +type FileSystemStoragePathSourceConfig struct { + // The servables to monitor for new versions, and aspire. + Servables []*FileSystemStoragePathSourceConfig_ServableToMonitor `protobuf:"bytes,5,rep,name=servables,proto3" json:"servables,omitempty"` + // A single servable name/base_path pair to monitor. + // DEPRECATED: Use 'servables' instead. + // TODO(b/30898016): Stop using these fields, and ultimately remove them here. + ServableName string `protobuf:"bytes,1,opt,name=servable_name,json=servableName,proto3" json:"servable_name,omitempty"` // Deprecated: Do not use. + BasePath string `protobuf:"bytes,2,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"` // Deprecated: Do not use. + // How long to wait between file-system polling to look for children of + // 'base_path', in seconds. + // + // If set to zero, filesystem will be polled exactly once. If set to a + // negative value (for testing use only), polling will be entirely disabled. + FileSystemPollWaitSeconds int64 `protobuf:"varint,3,opt,name=file_system_poll_wait_seconds,json=fileSystemPollWaitSeconds,proto3" json:"file_system_poll_wait_seconds,omitempty"` + // If true, then FileSystemStoragePathSource::Create() and ::UpdateConfig() + // fail if, for any configured servables, the file system doesn't currently + // contain at least one version under the base path. + // (Otherwise, it will emit a warning and keep pinging the file system to + // check for a version to appear later.) + // DEPRECATED: Use 'servable_versions_always_present' instead, which includes + // this behavior. + // TODO(b/30898016): Remove 2019-10-31 or later. + FailIfZeroVersionsAtStartup bool `protobuf:"varint,4,opt,name=fail_if_zero_versions_at_startup,json=failIfZeroVersionsAtStartup,proto3" json:"fail_if_zero_versions_at_startup,omitempty"` // Deprecated: Do not use. + // If true, the servable is always expected to exist on the underlying + // filesystem. FileSystemStoragePathSource::Create() and ::UpdateConfig() will + // fail if, for any configured servables, the file system doesn't currently + // contain at least one version under the base path. In addition, if a polling + // loop find the base path empty, it will not unload existing servables. + ServableVersionsAlwaysPresent bool `protobuf:"varint,6,opt,name=servable_versions_always_present,json=servableVersionsAlwaysPresent,proto3" json:"servable_versions_always_present,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileSystemStoragePathSourceConfig) Reset() { *m = FileSystemStoragePathSourceConfig{} } +func (m *FileSystemStoragePathSourceConfig) String() string { return proto.CompactTextString(m) } +func (*FileSystemStoragePathSourceConfig) ProtoMessage() {} +func (*FileSystemStoragePathSourceConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_300b419ac6423181, []int{0} +} + +func (m *FileSystemStoragePathSourceConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileSystemStoragePathSourceConfig.Unmarshal(m, b) +} +func (m *FileSystemStoragePathSourceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileSystemStoragePathSourceConfig.Marshal(b, m, deterministic) +} +func (m *FileSystemStoragePathSourceConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileSystemStoragePathSourceConfig.Merge(m, src) +} +func (m *FileSystemStoragePathSourceConfig) XXX_Size() int { + return xxx_messageInfo_FileSystemStoragePathSourceConfig.Size(m) +} +func (m *FileSystemStoragePathSourceConfig) XXX_DiscardUnknown() { + xxx_messageInfo_FileSystemStoragePathSourceConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_FileSystemStoragePathSourceConfig proto.InternalMessageInfo + +func (m *FileSystemStoragePathSourceConfig) GetServables() []*FileSystemStoragePathSourceConfig_ServableToMonitor { + if m != nil { + return m.Servables + } + return nil +} + +// Deprecated: Do not use. +func (m *FileSystemStoragePathSourceConfig) GetServableName() string { + if m != nil { + return m.ServableName + } + return "" +} + +// Deprecated: Do not use. +func (m *FileSystemStoragePathSourceConfig) GetBasePath() string { + if m != nil { + return m.BasePath + } + return "" +} + +func (m *FileSystemStoragePathSourceConfig) GetFileSystemPollWaitSeconds() int64 { + if m != nil { + return m.FileSystemPollWaitSeconds + } + return 0 +} + +// Deprecated: Do not use. +func (m *FileSystemStoragePathSourceConfig) GetFailIfZeroVersionsAtStartup() bool { + if m != nil { + return m.FailIfZeroVersionsAtStartup + } + return false +} + +func (m *FileSystemStoragePathSourceConfig) GetServableVersionsAlwaysPresent() bool { + if m != nil { + return m.ServableVersionsAlwaysPresent + } + return false +} + +// A policy that dictates which version(s) of a servable should be served. +type FileSystemStoragePathSourceConfig_ServableVersionPolicy struct { + // Types that are valid to be assigned to PolicyChoice: + // *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest_ + // *FileSystemStoragePathSourceConfig_ServableVersionPolicy_All_ + // *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific_ + PolicyChoice isFileSystemStoragePathSourceConfig_ServableVersionPolicy_PolicyChoice `protobuf_oneof:"policy_choice"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy) Reset() { + *m = FileSystemStoragePathSourceConfig_ServableVersionPolicy{} +} +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy) String() string { + return proto.CompactTextString(m) +} +func (*FileSystemStoragePathSourceConfig_ServableVersionPolicy) ProtoMessage() {} +func (*FileSystemStoragePathSourceConfig_ServableVersionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_300b419ac6423181, []int{0, 0} +} + +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy.Unmarshal(m, b) +} +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy.Marshal(b, m, deterministic) +} +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy.Merge(m, src) +} +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy) XXX_Size() int { + return xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy.Size(m) +} +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy proto.InternalMessageInfo + +type isFileSystemStoragePathSourceConfig_ServableVersionPolicy_PolicyChoice interface { + isFileSystemStoragePathSourceConfig_ServableVersionPolicy_PolicyChoice() +} + +type FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest_ struct { + Latest *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest `protobuf:"bytes,100,opt,name=latest,proto3,oneof"` +} + +type FileSystemStoragePathSourceConfig_ServableVersionPolicy_All_ struct { + All *FileSystemStoragePathSourceConfig_ServableVersionPolicy_All `protobuf:"bytes,101,opt,name=all,proto3,oneof"` +} + +type FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific_ struct { + Specific *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific `protobuf:"bytes,102,opt,name=specific,proto3,oneof"` +} + +func (*FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest_) isFileSystemStoragePathSourceConfig_ServableVersionPolicy_PolicyChoice() { +} + +func (*FileSystemStoragePathSourceConfig_ServableVersionPolicy_All_) isFileSystemStoragePathSourceConfig_ServableVersionPolicy_PolicyChoice() { +} + +func (*FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific_) isFileSystemStoragePathSourceConfig_ServableVersionPolicy_PolicyChoice() { +} + +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy) GetPolicyChoice() isFileSystemStoragePathSourceConfig_ServableVersionPolicy_PolicyChoice { + if m != nil { + return m.PolicyChoice + } + return nil +} + +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy) GetLatest() *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest { + if x, ok := m.GetPolicyChoice().(*FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest_); ok { + return x.Latest + } + return nil +} + +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy) GetAll() *FileSystemStoragePathSourceConfig_ServableVersionPolicy_All { + if x, ok := m.GetPolicyChoice().(*FileSystemStoragePathSourceConfig_ServableVersionPolicy_All_); ok { + return x.All + } + return nil +} + +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy) GetSpecific() *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific { + if x, ok := m.GetPolicyChoice().(*FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific_); ok { + return x.Specific + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*FileSystemStoragePathSourceConfig_ServableVersionPolicy) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest_)(nil), + (*FileSystemStoragePathSourceConfig_ServableVersionPolicy_All_)(nil), + (*FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific_)(nil), + } +} + +// Serve the latest versions (i.e. the ones with the highest version +// numbers), among those found on disk. +// +// This is the default policy, with the default number of versions as 1. +type FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest struct { + // Number of latest versions to serve. (The default is 1.) + NumVersions uint32 `protobuf:"varint,1,opt,name=num_versions,json=numVersions,proto3" json:"num_versions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest) Reset() { + *m = FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest{} +} +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest) String() string { + return proto.CompactTextString(m) +} +func (*FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest) ProtoMessage() {} +func (*FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest) Descriptor() ([]byte, []int) { + return fileDescriptor_300b419ac6423181, []int{0, 0, 0} +} + +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest.Unmarshal(m, b) +} +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest.Marshal(b, m, deterministic) +} +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest.Merge(m, src) +} +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest) XXX_Size() int { + return xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest.Size(m) +} +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest) XXX_DiscardUnknown() { + xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest.DiscardUnknown(m) +} + +var xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest proto.InternalMessageInfo + +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest) GetNumVersions() uint32 { + if m != nil { + return m.NumVersions + } + return 0 +} + +// Serve all versions found on disk. +type FileSystemStoragePathSourceConfig_ServableVersionPolicy_All struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_All) Reset() { + *m = FileSystemStoragePathSourceConfig_ServableVersionPolicy_All{} +} +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_All) String() string { + return proto.CompactTextString(m) +} +func (*FileSystemStoragePathSourceConfig_ServableVersionPolicy_All) ProtoMessage() {} +func (*FileSystemStoragePathSourceConfig_ServableVersionPolicy_All) Descriptor() ([]byte, []int) { + return fileDescriptor_300b419ac6423181, []int{0, 0, 1} +} + +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_All) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy_All.Unmarshal(m, b) +} +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_All) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy_All.Marshal(b, m, deterministic) +} +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_All) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy_All.Merge(m, src) +} +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_All) XXX_Size() int { + return xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy_All.Size(m) +} +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_All) XXX_DiscardUnknown() { + xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy_All.DiscardUnknown(m) +} + +var xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy_All proto.InternalMessageInfo + +// Serve a specific version (or set of versions). +// +// This policy is useful for rolling back to a specific version, or for +// canarying a specific version while still serving a separate stable +// version. +type FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific struct { + // The version numbers to serve. + Versions []int64 `protobuf:"varint,1,rep,packed,name=versions,proto3" json:"versions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific) Reset() { + *m = FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific{} +} +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific) String() string { + return proto.CompactTextString(m) +} +func (*FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific) ProtoMessage() {} +func (*FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific) Descriptor() ([]byte, []int) { + return fileDescriptor_300b419ac6423181, []int{0, 0, 2} +} + +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific.Unmarshal(m, b) +} +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific.Marshal(b, m, deterministic) +} +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific.Merge(m, src) +} +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific) XXX_Size() int { + return xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific.Size(m) +} +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific) XXX_DiscardUnknown() { + xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific.DiscardUnknown(m) +} + +var xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific proto.InternalMessageInfo + +func (m *FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific) GetVersions() []int64 { + if m != nil { + return m.Versions + } + return nil +} + +// A servable name and base path to look for versions of the servable. +type FileSystemStoragePathSourceConfig_ServableToMonitor struct { + // The servable name to supply in aspired-versions callback calls. Child + // paths of 'base_path' are considered to be versions of this servable. + ServableName string `protobuf:"bytes,1,opt,name=servable_name,json=servableName,proto3" json:"servable_name,omitempty"` + // The path to monitor, i.e. look for child paths of the form base_path/123. + BasePath string `protobuf:"bytes,2,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"` + // The policy to determines the number of versions of the servable to be + // served at the same time. + ServableVersionPolicy *FileSystemStoragePathSourceConfig_ServableVersionPolicy `protobuf:"bytes,4,opt,name=servable_version_policy,json=servableVersionPolicy,proto3" json:"servable_version_policy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileSystemStoragePathSourceConfig_ServableToMonitor) Reset() { + *m = FileSystemStoragePathSourceConfig_ServableToMonitor{} +} +func (m *FileSystemStoragePathSourceConfig_ServableToMonitor) String() string { + return proto.CompactTextString(m) +} +func (*FileSystemStoragePathSourceConfig_ServableToMonitor) ProtoMessage() {} +func (*FileSystemStoragePathSourceConfig_ServableToMonitor) Descriptor() ([]byte, []int) { + return fileDescriptor_300b419ac6423181, []int{0, 1} +} + +func (m *FileSystemStoragePathSourceConfig_ServableToMonitor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableToMonitor.Unmarshal(m, b) +} +func (m *FileSystemStoragePathSourceConfig_ServableToMonitor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableToMonitor.Marshal(b, m, deterministic) +} +func (m *FileSystemStoragePathSourceConfig_ServableToMonitor) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableToMonitor.Merge(m, src) +} +func (m *FileSystemStoragePathSourceConfig_ServableToMonitor) XXX_Size() int { + return xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableToMonitor.Size(m) +} +func (m *FileSystemStoragePathSourceConfig_ServableToMonitor) XXX_DiscardUnknown() { + xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableToMonitor.DiscardUnknown(m) +} + +var xxx_messageInfo_FileSystemStoragePathSourceConfig_ServableToMonitor proto.InternalMessageInfo + +func (m *FileSystemStoragePathSourceConfig_ServableToMonitor) GetServableName() string { + if m != nil { + return m.ServableName + } + return "" +} + +func (m *FileSystemStoragePathSourceConfig_ServableToMonitor) GetBasePath() string { + if m != nil { + return m.BasePath + } + return "" +} + +func (m *FileSystemStoragePathSourceConfig_ServableToMonitor) GetServableVersionPolicy() *FileSystemStoragePathSourceConfig_ServableVersionPolicy { + if m != nil { + return m.ServableVersionPolicy + } + return nil +} + +func init() { + proto.RegisterType((*FileSystemStoragePathSourceConfig)(nil), "tensorflow.serving.FileSystemStoragePathSourceConfig") + proto.RegisterType((*FileSystemStoragePathSourceConfig_ServableVersionPolicy)(nil), "tensorflow.serving.FileSystemStoragePathSourceConfig.ServableVersionPolicy") + proto.RegisterType((*FileSystemStoragePathSourceConfig_ServableVersionPolicy_Latest)(nil), "tensorflow.serving.FileSystemStoragePathSourceConfig.ServableVersionPolicy.Latest") + proto.RegisterType((*FileSystemStoragePathSourceConfig_ServableVersionPolicy_All)(nil), "tensorflow.serving.FileSystemStoragePathSourceConfig.ServableVersionPolicy.All") + proto.RegisterType((*FileSystemStoragePathSourceConfig_ServableVersionPolicy_Specific)(nil), "tensorflow.serving.FileSystemStoragePathSourceConfig.ServableVersionPolicy.Specific") + proto.RegisterType((*FileSystemStoragePathSourceConfig_ServableToMonitor)(nil), "tensorflow.serving.FileSystemStoragePathSourceConfig.ServableToMonitor") +} + +func init() { + proto.RegisterFile("tensorflow_serving/sources/storage_path/file_system_storage_path_source.proto", fileDescriptor_300b419ac6423181) +} + +var fileDescriptor_300b419ac6423181 = []byte{ + // 527 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xd1, 0x6e, 0xd3, 0x3e, + 0x14, 0xc6, 0x9b, 0xa5, 0xab, 0x52, 0x77, 0xd5, 0xff, 0x8f, 0xa5, 0x89, 0x90, 0x69, 0x22, 0x03, + 0x09, 0x22, 0x21, 0xa5, 0x52, 0x79, 0x01, 0x3a, 0x24, 0xd6, 0x01, 0x83, 0xca, 0x99, 0x40, 0xe2, + 0xc6, 0x72, 0xb3, 0x93, 0xd6, 0x92, 0x1b, 0x47, 0xb6, 0xbb, 0xaa, 0xdc, 0xf2, 0x78, 0xbc, 0x02, + 0x37, 0xbc, 0x09, 0x8a, 0xd3, 0xa6, 0xdd, 0x3a, 0x89, 0x0b, 0x7a, 0x69, 0xfb, 0x3b, 0xbf, 0x2f, + 0xc7, 0xe7, 0x73, 0xd0, 0x95, 0x81, 0x5c, 0x4b, 0x95, 0x09, 0xb9, 0xa0, 0x1a, 0xd4, 0x2d, 0xcf, + 0x27, 0x3d, 0x2d, 0xe7, 0x2a, 0x05, 0xdd, 0xd3, 0x46, 0x2a, 0x36, 0x01, 0x5a, 0x30, 0x33, 0xed, + 0x65, 0x5c, 0x00, 0xd5, 0x4b, 0x6d, 0x60, 0x46, 0xb7, 0x0f, 0x68, 0xa5, 0x8e, 0x0b, 0x25, 0x8d, + 0xc4, 0x78, 0x83, 0x8b, 0x57, 0xb8, 0x67, 0xbf, 0x3c, 0x74, 0xf6, 0x8e, 0x0b, 0x48, 0x6c, 0x71, + 0x52, 0xd5, 0x8e, 0x98, 0x99, 0x26, 0xb6, 0xf2, 0xad, 0xcc, 0x33, 0x3e, 0xc1, 0x80, 0xda, 0x65, + 0x01, 0x1b, 0x0b, 0xd0, 0xfe, 0x61, 0xe8, 0x46, 0x9d, 0xfe, 0x45, 0xbc, 0x4b, 0x8b, 0xff, 0x4a, + 0x8a, 0x93, 0x15, 0xe6, 0x5a, 0x5e, 0xc9, 0x9c, 0x1b, 0xa9, 0xc8, 0x86, 0x8c, 0x5f, 0xa2, 0xee, + 0x7a, 0x41, 0x73, 0x36, 0x03, 0xdf, 0x09, 0x9d, 0xa8, 0x7d, 0x7e, 0xe0, 0x3b, 0xe4, 0x68, 0x7d, + 0xf0, 0x89, 0xcd, 0x00, 0x3f, 0x45, 0xed, 0x31, 0xd3, 0x55, 0x8f, 0xfe, 0x41, 0x2d, 0xf2, 0xca, + 0xcd, 0xd2, 0x12, 0xbf, 0x41, 0xa7, 0xdb, 0x77, 0x52, 0x48, 0x21, 0xe8, 0x82, 0x71, 0x43, 0x35, + 0xa4, 0x32, 0xbf, 0xd1, 0xbe, 0x1b, 0x3a, 0x91, 0x4b, 0x9e, 0x64, 0xf5, 0x07, 0x8f, 0xa4, 0x10, + 0x5f, 0x19, 0x37, 0x49, 0x25, 0xc0, 0x97, 0x28, 0xcc, 0x18, 0x17, 0x94, 0x67, 0xf4, 0x3b, 0x28, + 0x49, 0x6f, 0x41, 0x69, 0x2e, 0x73, 0x4d, 0x99, 0xa1, 0xda, 0x30, 0x65, 0xe6, 0x85, 0xdf, 0x0c, + 0x9d, 0xc8, 0xb3, 0xce, 0x27, 0xa5, 0xf6, 0x32, 0xfb, 0x06, 0x4a, 0x7e, 0x59, 0x09, 0x07, 0x26, + 0xa9, 0x64, 0xf8, 0x02, 0x85, 0x75, 0x5b, 0x1b, 0x8c, 0x58, 0xb0, 0xa5, 0xa6, 0x85, 0x02, 0x0d, + 0xb9, 0xf1, 0x5b, 0x25, 0x8a, 0x9c, 0xae, 0x75, 0x35, 0xc4, 0xaa, 0x46, 0x95, 0x28, 0xf8, 0xe9, + 0xa2, 0xe3, 0xe4, 0xae, 0x62, 0x24, 0x05, 0x4f, 0x97, 0x58, 0xa0, 0x96, 0x60, 0x06, 0xb4, 0xf1, + 0x6f, 0x42, 0x27, 0xea, 0xf4, 0xc9, 0xbf, 0x4d, 0xe7, 0x0e, 0x3c, 0xfe, 0x68, 0xc9, 0xc3, 0x06, + 0x59, 0x79, 0xe0, 0x14, 0xb9, 0x4c, 0x08, 0x1f, 0xac, 0xd5, 0xe7, 0x7d, 0x5a, 0x0d, 0x84, 0x18, + 0x36, 0x48, 0x49, 0xc7, 0x0a, 0x79, 0xba, 0x80, 0x94, 0x67, 0x3c, 0xf5, 0x33, 0xeb, 0x74, 0xbd, + 0x4f, 0xa7, 0x64, 0xc5, 0x1e, 0x36, 0x48, 0xed, 0x13, 0xbc, 0x42, 0xad, 0xaa, 0x59, 0x7c, 0x86, + 0x8e, 0xf2, 0xf9, 0xac, 0x1e, 0x97, 0x4d, 0x62, 0x97, 0x74, 0xf2, 0xf9, 0x6c, 0x3d, 0x9a, 0xe0, + 0x10, 0xb9, 0x03, 0x21, 0x82, 0x17, 0xc8, 0x5b, 0xb3, 0x70, 0x80, 0xbc, 0xad, 0x0a, 0x37, 0x72, + 0x49, 0xbd, 0x3e, 0xff, 0x0f, 0x75, 0x0b, 0x6b, 0x4d, 0xd3, 0xa9, 0xe4, 0x29, 0x04, 0xbf, 0x1d, + 0xf4, 0x68, 0xe7, 0x39, 0xe0, 0xe7, 0x0f, 0xbe, 0x81, 0x7b, 0xf9, 0x3f, 0xd9, 0xc9, 0xff, 0x56, + 0xf6, 0x7f, 0x38, 0xe8, 0xf1, 0xfd, 0xbc, 0xd1, 0xca, 0xda, 0x26, 0xb6, 0xd3, 0xff, 0xb0, 0xc7, + 0x8b, 0x24, 0xc7, 0xfa, 0xa1, 0xed, 0xf7, 0x4d, 0xcf, 0xfd, 0xbf, 0x39, 0x6e, 0xd9, 0x3f, 0xcf, + 0xeb, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x73, 0xc4, 0xd3, 0x40, 0xca, 0x04, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/serving/get_model_metadata.pb.go b/executor/proto/tensorflow/serving/get_model_metadata.pb.go new file mode 100644 index 0000000000..1ee2f0c467 --- /dev/null +++ b/executor/proto/tensorflow/serving/get_model_metadata.pb.go @@ -0,0 +1,204 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/apis/get_model_metadata.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + protobuf "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Message returned for "signature_def" field. +type SignatureDefMap struct { + SignatureDef map[string]*protobuf.SignatureDef `protobuf:"bytes,1,rep,name=signature_def,json=signatureDef,proto3" json:"signature_def,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SignatureDefMap) Reset() { *m = SignatureDefMap{} } +func (m *SignatureDefMap) String() string { return proto.CompactTextString(m) } +func (*SignatureDefMap) ProtoMessage() {} +func (*SignatureDefMap) Descriptor() ([]byte, []int) { + return fileDescriptor_4f6b64919204d6ed, []int{0} +} + +func (m *SignatureDefMap) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SignatureDefMap.Unmarshal(m, b) +} +func (m *SignatureDefMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SignatureDefMap.Marshal(b, m, deterministic) +} +func (m *SignatureDefMap) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureDefMap.Merge(m, src) +} +func (m *SignatureDefMap) XXX_Size() int { + return xxx_messageInfo_SignatureDefMap.Size(m) +} +func (m *SignatureDefMap) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureDefMap.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureDefMap proto.InternalMessageInfo + +func (m *SignatureDefMap) GetSignatureDef() map[string]*protobuf.SignatureDef { + if m != nil { + return m.SignatureDef + } + return nil +} + +type GetModelMetadataRequest struct { + // Model Specification indicating which model we are querying for metadata. + // If version is not specified, will use the latest (numerical) version. + ModelSpec *ModelSpec `protobuf:"bytes,1,opt,name=model_spec,json=modelSpec,proto3" json:"model_spec,omitempty"` + // Metadata fields to get. Currently supported: "signature_def". + MetadataField []string `protobuf:"bytes,2,rep,name=metadata_field,json=metadataField,proto3" json:"metadata_field,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetModelMetadataRequest) Reset() { *m = GetModelMetadataRequest{} } +func (m *GetModelMetadataRequest) String() string { return proto.CompactTextString(m) } +func (*GetModelMetadataRequest) ProtoMessage() {} +func (*GetModelMetadataRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4f6b64919204d6ed, []int{1} +} + +func (m *GetModelMetadataRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetModelMetadataRequest.Unmarshal(m, b) +} +func (m *GetModelMetadataRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetModelMetadataRequest.Marshal(b, m, deterministic) +} +func (m *GetModelMetadataRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetModelMetadataRequest.Merge(m, src) +} +func (m *GetModelMetadataRequest) XXX_Size() int { + return xxx_messageInfo_GetModelMetadataRequest.Size(m) +} +func (m *GetModelMetadataRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetModelMetadataRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetModelMetadataRequest proto.InternalMessageInfo + +func (m *GetModelMetadataRequest) GetModelSpec() *ModelSpec { + if m != nil { + return m.ModelSpec + } + return nil +} + +func (m *GetModelMetadataRequest) GetMetadataField() []string { + if m != nil { + return m.MetadataField + } + return nil +} + +type GetModelMetadataResponse struct { + // Model Specification indicating which model this metadata belongs to. + ModelSpec *ModelSpec `protobuf:"bytes,1,opt,name=model_spec,json=modelSpec,proto3" json:"model_spec,omitempty"` + // Map of metadata field name to metadata field. The options for metadata + // field name are listed in GetModelMetadataRequest. Currently supported: + // "signature_def". + Metadata map[string]*any.Any `protobuf:"bytes,2,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetModelMetadataResponse) Reset() { *m = GetModelMetadataResponse{} } +func (m *GetModelMetadataResponse) String() string { return proto.CompactTextString(m) } +func (*GetModelMetadataResponse) ProtoMessage() {} +func (*GetModelMetadataResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4f6b64919204d6ed, []int{2} +} + +func (m *GetModelMetadataResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetModelMetadataResponse.Unmarshal(m, b) +} +func (m *GetModelMetadataResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetModelMetadataResponse.Marshal(b, m, deterministic) +} +func (m *GetModelMetadataResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetModelMetadataResponse.Merge(m, src) +} +func (m *GetModelMetadataResponse) XXX_Size() int { + return xxx_messageInfo_GetModelMetadataResponse.Size(m) +} +func (m *GetModelMetadataResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetModelMetadataResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetModelMetadataResponse proto.InternalMessageInfo + +func (m *GetModelMetadataResponse) GetModelSpec() *ModelSpec { + if m != nil { + return m.ModelSpec + } + return nil +} + +func (m *GetModelMetadataResponse) GetMetadata() map[string]*any.Any { + if m != nil { + return m.Metadata + } + return nil +} + +func init() { + proto.RegisterType((*SignatureDefMap)(nil), "tensorflow.serving.SignatureDefMap") + proto.RegisterMapType((map[string]*protobuf.SignatureDef)(nil), "tensorflow.serving.SignatureDefMap.SignatureDefEntry") + proto.RegisterType((*GetModelMetadataRequest)(nil), "tensorflow.serving.GetModelMetadataRequest") + proto.RegisterType((*GetModelMetadataResponse)(nil), "tensorflow.serving.GetModelMetadataResponse") + proto.RegisterMapType((map[string]*any.Any)(nil), "tensorflow.serving.GetModelMetadataResponse.MetadataEntry") +} + +func init() { + proto.RegisterFile("tensorflow_serving/apis/get_model_metadata.proto", fileDescriptor_4f6b64919204d6ed) +} + +var fileDescriptor_4f6b64919204d6ed = []byte{ + // 376 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x52, 0x4f, 0x4b, 0xe3, 0x40, + 0x1c, 0x65, 0x12, 0x76, 0xd9, 0x4e, 0xb6, 0xbb, 0x1a, 0x04, 0x63, 0x40, 0x08, 0x15, 0x21, 0x7a, + 0x98, 0x48, 0x44, 0x90, 0xe2, 0x45, 0xf1, 0xcf, 0xa9, 0x07, 0x53, 0x10, 0xf4, 0x12, 0xa6, 0xcd, + 0x2f, 0x31, 0x98, 0x66, 0x62, 0x66, 0x52, 0xc9, 0xc5, 0x8b, 0x5f, 0xce, 0x8f, 0xe4, 0x51, 0x92, + 0x74, 0x6c, 0x6a, 0xdb, 0x93, 0xb7, 0x99, 0x37, 0xef, 0xcd, 0x7b, 0x33, 0xef, 0x87, 0x8f, 0x04, + 0xa4, 0x9c, 0xe5, 0x61, 0xc2, 0x5e, 0x7c, 0x0e, 0xf9, 0x34, 0x4e, 0x23, 0x87, 0x66, 0x31, 0x77, + 0x22, 0x10, 0xfe, 0x84, 0x05, 0x90, 0xf8, 0x13, 0x10, 0x34, 0xa0, 0x82, 0x92, 0x2c, 0x67, 0x82, + 0xe9, 0xfa, 0x5c, 0x41, 0x66, 0x0a, 0x73, 0x27, 0x62, 0x2c, 0x4a, 0xc0, 0xa9, 0x19, 0xa3, 0x22, + 0x74, 0x68, 0x5a, 0x36, 0x74, 0xf3, 0x60, 0x4e, 0x77, 0xc6, 0x2c, 0x6f, 0x71, 0xaa, 0x7b, 0xfd, + 0x28, 0xa7, 0xd9, 0xe3, 0x8c, 0xba, 0xb7, 0x2e, 0x4b, 0x9d, 0xa3, 0x21, 0xf5, 0xde, 0x11, 0xfe, + 0x3f, 0x8c, 0xa3, 0x94, 0x8a, 0x22, 0x87, 0x4b, 0x08, 0x07, 0x34, 0xd3, 0x1f, 0x70, 0x97, 0x4b, + 0xc8, 0x0f, 0x20, 0x34, 0x90, 0xa5, 0xda, 0x9a, 0x7b, 0x42, 0x96, 0xa3, 0x92, 0x6f, 0xda, 0x85, + 0xfd, 0x55, 0x2a, 0xf2, 0xd2, 0xfb, 0xcb, 0x5b, 0x90, 0x79, 0x8f, 0x37, 0x97, 0x28, 0xfa, 0x06, + 0x56, 0x9f, 0xa0, 0x34, 0x90, 0x85, 0xec, 0x8e, 0x57, 0x2d, 0x75, 0x82, 0x7f, 0x4d, 0x69, 0x52, + 0x80, 0xa1, 0x58, 0xc8, 0xd6, 0x5c, 0xa3, 0x6d, 0xdd, 0xd6, 0x7b, 0x0d, 0xad, 0xaf, 0x9c, 0xa2, + 0xde, 0x2b, 0xde, 0xbe, 0x01, 0x31, 0xa8, 0x1e, 0x37, 0x98, 0xfd, 0xb1, 0x07, 0xcf, 0x05, 0x70, + 0xa1, 0x9f, 0x61, 0xdc, 0x7c, 0x3e, 0xcf, 0x60, 0x5c, 0xfb, 0x68, 0xee, 0xee, 0xaa, 0xe7, 0xd4, + 0xea, 0x61, 0x06, 0x63, 0xaf, 0x33, 0x91, 0x4b, 0x7d, 0x1f, 0xff, 0x93, 0xa5, 0xf9, 0x61, 0x0c, + 0x49, 0x60, 0x28, 0x96, 0x6a, 0x77, 0xbc, 0xae, 0x44, 0xaf, 0x2b, 0xb0, 0xf7, 0xa6, 0x60, 0x63, + 0x39, 0x00, 0xcf, 0x58, 0xca, 0xe1, 0x87, 0x09, 0xee, 0xf0, 0x1f, 0xe9, 0x55, 0x7b, 0x6b, 0x6e, + 0x7f, 0x95, 0x76, 0x9d, 0x3b, 0x91, 0x40, 0xd3, 0xc8, 0xd7, 0x5d, 0xe6, 0x2d, 0xee, 0x2e, 0x1c, + 0xad, 0x68, 0xe2, 0x70, 0xb1, 0x89, 0x2d, 0xd2, 0xcc, 0x26, 0x91, 0x73, 0x47, 0xce, 0xd3, 0xb2, + 0xd5, 0xc2, 0x85, 0xfa, 0x81, 0xd0, 0xe8, 0x77, 0x7d, 0x7a, 0xfc, 0x19, 0x00, 0x00, 0xff, 0xff, + 0xfe, 0xfe, 0x12, 0x0e, 0x0f, 0x03, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/serving/get_model_status.pb.go b/executor/proto/tensorflow/serving/get_model_status.pb.go new file mode 100644 index 0000000000..0d543e0d64 --- /dev/null +++ b/executor/proto/tensorflow/serving/get_model_status.pb.go @@ -0,0 +1,257 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/apis/get_model_status.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// States that map to ManagerState enum in +// tensorflow_serving/core/servable_state.h +type ModelVersionStatus_State int32 + +const ( + // Default value. + ModelVersionStatus_UNKNOWN ModelVersionStatus_State = 0 + // The manager is tracking this servable, but has not initiated any action + // pertaining to it. + ModelVersionStatus_START ModelVersionStatus_State = 10 + // The manager has decided to load this servable. In particular, checks + // around resource availability and other aspects have passed, and the + // manager is about to invoke the loader's Load() method. + ModelVersionStatus_LOADING ModelVersionStatus_State = 20 + // The manager has successfully loaded this servable and made it available + // for serving (i.e. GetServableHandle(id) will succeed). To avoid races, + // this state is not reported until *after* the servable is made + // available. + ModelVersionStatus_AVAILABLE ModelVersionStatus_State = 30 + // The manager has decided to make this servable unavailable, and unload + // it. To avoid races, this state is reported *before* the servable is + // made unavailable. + ModelVersionStatus_UNLOADING ModelVersionStatus_State = 40 + // This servable has reached the end of its journey in the manager. Either + // it loaded and ultimately unloaded successfully, or it hit an error at + // some point in its lifecycle. + ModelVersionStatus_END ModelVersionStatus_State = 50 +) + +var ModelVersionStatus_State_name = map[int32]string{ + 0: "UNKNOWN", + 10: "START", + 20: "LOADING", + 30: "AVAILABLE", + 40: "UNLOADING", + 50: "END", +} + +var ModelVersionStatus_State_value = map[string]int32{ + "UNKNOWN": 0, + "START": 10, + "LOADING": 20, + "AVAILABLE": 30, + "UNLOADING": 40, + "END": 50, +} + +func (x ModelVersionStatus_State) String() string { + return proto.EnumName(ModelVersionStatus_State_name, int32(x)) +} + +func (ModelVersionStatus_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_1efd090da1a85b62, []int{1, 0} +} + +// GetModelStatusRequest contains a ModelSpec indicating the model for which +// to get status. +type GetModelStatusRequest struct { + // Model Specification. If version is not specified, information about all + // versions of the model will be returned. If a version is specified, the + // status of only that version will be returned. + ModelSpec *ModelSpec `protobuf:"bytes,1,opt,name=model_spec,json=modelSpec,proto3" json:"model_spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetModelStatusRequest) Reset() { *m = GetModelStatusRequest{} } +func (m *GetModelStatusRequest) String() string { return proto.CompactTextString(m) } +func (*GetModelStatusRequest) ProtoMessage() {} +func (*GetModelStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1efd090da1a85b62, []int{0} +} + +func (m *GetModelStatusRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetModelStatusRequest.Unmarshal(m, b) +} +func (m *GetModelStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetModelStatusRequest.Marshal(b, m, deterministic) +} +func (m *GetModelStatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetModelStatusRequest.Merge(m, src) +} +func (m *GetModelStatusRequest) XXX_Size() int { + return xxx_messageInfo_GetModelStatusRequest.Size(m) +} +func (m *GetModelStatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetModelStatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetModelStatusRequest proto.InternalMessageInfo + +func (m *GetModelStatusRequest) GetModelSpec() *ModelSpec { + if m != nil { + return m.ModelSpec + } + return nil +} + +// Version number, state, and status for a single version of a model. +type ModelVersionStatus struct { + // Model version. + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // Model state. + State ModelVersionStatus_State `protobuf:"varint,2,opt,name=state,proto3,enum=tensorflow.serving.ModelVersionStatus_State" json:"state,omitempty"` + // Model status. + Status *StatusProto `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ModelVersionStatus) Reset() { *m = ModelVersionStatus{} } +func (m *ModelVersionStatus) String() string { return proto.CompactTextString(m) } +func (*ModelVersionStatus) ProtoMessage() {} +func (*ModelVersionStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_1efd090da1a85b62, []int{1} +} + +func (m *ModelVersionStatus) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ModelVersionStatus.Unmarshal(m, b) +} +func (m *ModelVersionStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ModelVersionStatus.Marshal(b, m, deterministic) +} +func (m *ModelVersionStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ModelVersionStatus.Merge(m, src) +} +func (m *ModelVersionStatus) XXX_Size() int { + return xxx_messageInfo_ModelVersionStatus.Size(m) +} +func (m *ModelVersionStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ModelVersionStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ModelVersionStatus proto.InternalMessageInfo + +func (m *ModelVersionStatus) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *ModelVersionStatus) GetState() ModelVersionStatus_State { + if m != nil { + return m.State + } + return ModelVersionStatus_UNKNOWN +} + +func (m *ModelVersionStatus) GetStatus() *StatusProto { + if m != nil { + return m.Status + } + return nil +} + +// Response for ModelStatusRequest on successful run. +type GetModelStatusResponse struct { + // Version number and status information for applicable model version(s). + ModelVersionStatus []*ModelVersionStatus `protobuf:"bytes,1,rep,name=model_version_status,proto3" json:"model_version_status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetModelStatusResponse) Reset() { *m = GetModelStatusResponse{} } +func (m *GetModelStatusResponse) String() string { return proto.CompactTextString(m) } +func (*GetModelStatusResponse) ProtoMessage() {} +func (*GetModelStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1efd090da1a85b62, []int{2} +} + +func (m *GetModelStatusResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetModelStatusResponse.Unmarshal(m, b) +} +func (m *GetModelStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetModelStatusResponse.Marshal(b, m, deterministic) +} +func (m *GetModelStatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetModelStatusResponse.Merge(m, src) +} +func (m *GetModelStatusResponse) XXX_Size() int { + return xxx_messageInfo_GetModelStatusResponse.Size(m) +} +func (m *GetModelStatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetModelStatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetModelStatusResponse proto.InternalMessageInfo + +func (m *GetModelStatusResponse) GetModelVersionStatus() []*ModelVersionStatus { + if m != nil { + return m.ModelVersionStatus + } + return nil +} + +func init() { + proto.RegisterEnum("tensorflow.serving.ModelVersionStatus_State", ModelVersionStatus_State_name, ModelVersionStatus_State_value) + proto.RegisterType((*GetModelStatusRequest)(nil), "tensorflow.serving.GetModelStatusRequest") + proto.RegisterType((*ModelVersionStatus)(nil), "tensorflow.serving.ModelVersionStatus") + proto.RegisterType((*GetModelStatusResponse)(nil), "tensorflow.serving.GetModelStatusResponse") +} + +func init() { + proto.RegisterFile("tensorflow_serving/apis/get_model_status.proto", fileDescriptor_1efd090da1a85b62) +} + +var fileDescriptor_1efd090da1a85b62 = []byte{ + // 339 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xcd, 0x4b, 0xc3, 0x30, + 0x18, 0xc6, 0xed, 0xca, 0x36, 0xf6, 0x0e, 0xa5, 0x84, 0x29, 0x65, 0xa0, 0x8e, 0x2a, 0xd2, 0x83, + 0x74, 0x50, 0x0f, 0x5e, 0xbc, 0x74, 0x6c, 0x8c, 0xe1, 0xec, 0x24, 0xfb, 0x10, 0xbc, 0x8c, 0x39, + 0x5f, 0x47, 0x61, 0x6b, 0x6a, 0x93, 0xcd, 0xab, 0x7f, 0xb6, 0x47, 0x49, 0xd2, 0xe1, 0x57, 0x07, + 0x9e, 0xda, 0xbc, 0x79, 0x9e, 0x5f, 0x9e, 0x27, 0x01, 0x4f, 0x60, 0xcc, 0x59, 0xfa, 0xb2, 0x64, + 0x6f, 0x53, 0x8e, 0xe9, 0x26, 0x8a, 0x17, 0xcd, 0x59, 0x12, 0xf1, 0xe6, 0x02, 0xc5, 0x74, 0xc5, + 0x9e, 0x71, 0x39, 0xe5, 0x62, 0x26, 0xd6, 0xdc, 0x4b, 0x52, 0x26, 0x18, 0x21, 0x5f, 0x7a, 0x2f, + 0xd3, 0xd7, 0xcf, 0x76, 0x31, 0x94, 0x5f, 0x1b, 0xeb, 0xe7, 0x39, 0xa2, 0xb5, 0x88, 0x96, 0xcd, + 0xef, 0x78, 0x67, 0x0c, 0x87, 0x5d, 0x14, 0x77, 0xd2, 0x37, 0x54, 0x73, 0x8a, 0xaf, 0x6b, 0xe4, + 0x82, 0xdc, 0x00, 0x64, 0x69, 0x12, 0x9c, 0xdb, 0x46, 0xc3, 0x70, 0xab, 0xfe, 0xb1, 0xf7, 0x37, + 0x8c, 0xa7, 0xbd, 0x09, 0xce, 0x69, 0x65, 0xb5, 0xfd, 0x75, 0xde, 0x0b, 0x40, 0xd4, 0xc6, 0x04, + 0x53, 0x1e, 0xb1, 0x58, 0xb3, 0x89, 0x0d, 0xe5, 0x8d, 0x1e, 0x28, 0xa2, 0x49, 0xb7, 0x4b, 0xd2, + 0x82, 0xa2, 0xcc, 0x85, 0x76, 0xa1, 0x61, 0xb8, 0x07, 0xfe, 0xe5, 0xce, 0x93, 0x7e, 0x00, 0x3d, + 0xf9, 0x41, 0xaa, 0xad, 0xe4, 0x1a, 0x4a, 0xba, 0x9b, 0x6d, 0xaa, 0xb8, 0xa7, 0x79, 0x10, 0x6d, + 0xbc, 0x97, 0xe5, 0x69, 0x26, 0x77, 0x86, 0x50, 0x54, 0x20, 0x52, 0x85, 0xf2, 0x38, 0xbc, 0x0d, + 0x07, 0x0f, 0xa1, 0xb5, 0x47, 0x2a, 0x50, 0x1c, 0x8e, 0x02, 0x3a, 0xb2, 0x40, 0xce, 0xfb, 0x83, + 0xa0, 0xdd, 0x0b, 0xbb, 0x56, 0x8d, 0xec, 0x43, 0x25, 0x98, 0x04, 0xbd, 0x7e, 0xd0, 0xea, 0x77, + 0xac, 0x13, 0xb9, 0x1c, 0x87, 0xdb, 0x5d, 0x97, 0x94, 0xc1, 0xec, 0x84, 0x6d, 0xcb, 0x77, 0x04, + 0x1c, 0xfd, 0xbe, 0x59, 0x9e, 0xb0, 0x98, 0x23, 0x79, 0x84, 0x9a, 0xbe, 0xda, 0xac, 0x7c, 0xf6, + 0xe0, 0xb6, 0xd1, 0x30, 0xdd, 0xaa, 0x7f, 0xf1, 0xbf, 0xea, 0x34, 0x97, 0xd1, 0x32, 0x3f, 0x0c, + 0xe3, 0xa9, 0xa4, 0xde, 0xf6, 0xea, 0x33, 0x00, 0x00, 0xff, 0xff, 0x04, 0x57, 0xa9, 0xc7, 0x6c, + 0x02, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/serving/inference.pb.go b/executor/proto/tensorflow/serving/inference.pb.go new file mode 100644 index 0000000000..a721f9fbe7 --- /dev/null +++ b/executor/proto/tensorflow/serving/inference.pb.go @@ -0,0 +1,294 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/apis/inference.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Inference request such as classification, regression, etc... +type InferenceTask struct { + // Model Specification. If version is not specified, will use the latest + // (numerical) version. + // All ModelSpecs in a MultiInferenceRequest must access the same model name. + ModelSpec *ModelSpec `protobuf:"bytes,1,opt,name=model_spec,json=modelSpec,proto3" json:"model_spec,omitempty"` + // Signature's method_name. Should be one of the method names defined in + // third_party/tensorflow/python/saved_model/signature_constants.py. + // e.g. "tensorflow/serving/classify". + MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InferenceTask) Reset() { *m = InferenceTask{} } +func (m *InferenceTask) String() string { return proto.CompactTextString(m) } +func (*InferenceTask) ProtoMessage() {} +func (*InferenceTask) Descriptor() ([]byte, []int) { + return fileDescriptor_4842740eef0fb1a6, []int{0} +} + +func (m *InferenceTask) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InferenceTask.Unmarshal(m, b) +} +func (m *InferenceTask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InferenceTask.Marshal(b, m, deterministic) +} +func (m *InferenceTask) XXX_Merge(src proto.Message) { + xxx_messageInfo_InferenceTask.Merge(m, src) +} +func (m *InferenceTask) XXX_Size() int { + return xxx_messageInfo_InferenceTask.Size(m) +} +func (m *InferenceTask) XXX_DiscardUnknown() { + xxx_messageInfo_InferenceTask.DiscardUnknown(m) +} + +var xxx_messageInfo_InferenceTask proto.InternalMessageInfo + +func (m *InferenceTask) GetModelSpec() *ModelSpec { + if m != nil { + return m.ModelSpec + } + return nil +} + +func (m *InferenceTask) GetMethodName() string { + if m != nil { + return m.MethodName + } + return "" +} + +// Inference result, matches the type of request or is an error. +type InferenceResult struct { + ModelSpec *ModelSpec `protobuf:"bytes,1,opt,name=model_spec,json=modelSpec,proto3" json:"model_spec,omitempty"` + // Types that are valid to be assigned to Result: + // *InferenceResult_ClassificationResult + // *InferenceResult_RegressionResult + Result isInferenceResult_Result `protobuf_oneof:"result"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InferenceResult) Reset() { *m = InferenceResult{} } +func (m *InferenceResult) String() string { return proto.CompactTextString(m) } +func (*InferenceResult) ProtoMessage() {} +func (*InferenceResult) Descriptor() ([]byte, []int) { + return fileDescriptor_4842740eef0fb1a6, []int{1} +} + +func (m *InferenceResult) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InferenceResult.Unmarshal(m, b) +} +func (m *InferenceResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InferenceResult.Marshal(b, m, deterministic) +} +func (m *InferenceResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_InferenceResult.Merge(m, src) +} +func (m *InferenceResult) XXX_Size() int { + return xxx_messageInfo_InferenceResult.Size(m) +} +func (m *InferenceResult) XXX_DiscardUnknown() { + xxx_messageInfo_InferenceResult.DiscardUnknown(m) +} + +var xxx_messageInfo_InferenceResult proto.InternalMessageInfo + +func (m *InferenceResult) GetModelSpec() *ModelSpec { + if m != nil { + return m.ModelSpec + } + return nil +} + +type isInferenceResult_Result interface { + isInferenceResult_Result() +} + +type InferenceResult_ClassificationResult struct { + ClassificationResult *ClassificationResult `protobuf:"bytes,2,opt,name=classification_result,json=classificationResult,proto3,oneof"` +} + +type InferenceResult_RegressionResult struct { + RegressionResult *RegressionResult `protobuf:"bytes,3,opt,name=regression_result,json=regressionResult,proto3,oneof"` +} + +func (*InferenceResult_ClassificationResult) isInferenceResult_Result() {} + +func (*InferenceResult_RegressionResult) isInferenceResult_Result() {} + +func (m *InferenceResult) GetResult() isInferenceResult_Result { + if m != nil { + return m.Result + } + return nil +} + +func (m *InferenceResult) GetClassificationResult() *ClassificationResult { + if x, ok := m.GetResult().(*InferenceResult_ClassificationResult); ok { + return x.ClassificationResult + } + return nil +} + +func (m *InferenceResult) GetRegressionResult() *RegressionResult { + if x, ok := m.GetResult().(*InferenceResult_RegressionResult); ok { + return x.RegressionResult + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*InferenceResult) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*InferenceResult_ClassificationResult)(nil), + (*InferenceResult_RegressionResult)(nil), + } +} + +// Inference request containing one or more requests. +type MultiInferenceRequest struct { + // Inference tasks. + Tasks []*InferenceTask `protobuf:"bytes,1,rep,name=tasks,proto3" json:"tasks,omitempty"` + // Input data. + Input *Input `protobuf:"bytes,2,opt,name=input,proto3" json:"input,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MultiInferenceRequest) Reset() { *m = MultiInferenceRequest{} } +func (m *MultiInferenceRequest) String() string { return proto.CompactTextString(m) } +func (*MultiInferenceRequest) ProtoMessage() {} +func (*MultiInferenceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4842740eef0fb1a6, []int{2} +} + +func (m *MultiInferenceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MultiInferenceRequest.Unmarshal(m, b) +} +func (m *MultiInferenceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MultiInferenceRequest.Marshal(b, m, deterministic) +} +func (m *MultiInferenceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MultiInferenceRequest.Merge(m, src) +} +func (m *MultiInferenceRequest) XXX_Size() int { + return xxx_messageInfo_MultiInferenceRequest.Size(m) +} +func (m *MultiInferenceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MultiInferenceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MultiInferenceRequest proto.InternalMessageInfo + +func (m *MultiInferenceRequest) GetTasks() []*InferenceTask { + if m != nil { + return m.Tasks + } + return nil +} + +func (m *MultiInferenceRequest) GetInput() *Input { + if m != nil { + return m.Input + } + return nil +} + +// Inference request containing one or more responses. +type MultiInferenceResponse struct { + // List of results; one for each InferenceTask in the request, returned in the + // same order as the request. + Results []*InferenceResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MultiInferenceResponse) Reset() { *m = MultiInferenceResponse{} } +func (m *MultiInferenceResponse) String() string { return proto.CompactTextString(m) } +func (*MultiInferenceResponse) ProtoMessage() {} +func (*MultiInferenceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4842740eef0fb1a6, []int{3} +} + +func (m *MultiInferenceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MultiInferenceResponse.Unmarshal(m, b) +} +func (m *MultiInferenceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MultiInferenceResponse.Marshal(b, m, deterministic) +} +func (m *MultiInferenceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MultiInferenceResponse.Merge(m, src) +} +func (m *MultiInferenceResponse) XXX_Size() int { + return xxx_messageInfo_MultiInferenceResponse.Size(m) +} +func (m *MultiInferenceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MultiInferenceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MultiInferenceResponse proto.InternalMessageInfo + +func (m *MultiInferenceResponse) GetResults() []*InferenceResult { + if m != nil { + return m.Results + } + return nil +} + +func init() { + proto.RegisterType((*InferenceTask)(nil), "tensorflow.serving.InferenceTask") + proto.RegisterType((*InferenceResult)(nil), "tensorflow.serving.InferenceResult") + proto.RegisterType((*MultiInferenceRequest)(nil), "tensorflow.serving.MultiInferenceRequest") + proto.RegisterType((*MultiInferenceResponse)(nil), "tensorflow.serving.MultiInferenceResponse") +} + +func init() { + proto.RegisterFile("tensorflow_serving/apis/inference.proto", fileDescriptor_4842740eef0fb1a6) +} + +var fileDescriptor_4842740eef0fb1a6 = []byte{ + // 362 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x92, 0xb1, 0x4e, 0xe3, 0x40, + 0x10, 0x86, 0xcf, 0x89, 0x92, 0xbb, 0x8c, 0x75, 0xba, 0xbb, 0xd5, 0xe5, 0x94, 0x8b, 0x84, 0x08, + 0x0e, 0x12, 0x2e, 0x90, 0x23, 0x85, 0x82, 0x06, 0x9a, 0xd0, 0x40, 0x11, 0x8a, 0x0d, 0x12, 0xa5, + 0x65, 0x9c, 0x49, 0xb0, 0x62, 0xef, 0x9a, 0x9d, 0x35, 0xd4, 0x3c, 0x01, 0xaf, 0x4b, 0x89, 0xf0, + 0xc6, 0x0e, 0x09, 0x0e, 0x14, 0x74, 0xd6, 0xe8, 0x9b, 0xef, 0x9f, 0x19, 0x2f, 0x1c, 0x68, 0x14, + 0x24, 0xd5, 0x2c, 0x96, 0x0f, 0x3e, 0xa1, 0xba, 0x8f, 0xc4, 0x7c, 0x10, 0xa4, 0x11, 0x0d, 0x22, + 0x31, 0x43, 0x85, 0x22, 0x44, 0x2f, 0x55, 0x52, 0x4b, 0xc6, 0x56, 0xa0, 0xb7, 0x04, 0xbb, 0x87, + 0xdb, 0x9a, 0xc3, 0x38, 0x20, 0x8a, 0x66, 0x51, 0x18, 0xe8, 0x48, 0x0a, 0x63, 0xe8, 0xf6, 0xb7, + 0x47, 0xa5, 0x99, 0xfe, 0x0c, 0x4a, 0xe4, 0x14, 0xe3, 0x25, 0xe4, 0x6e, 0x83, 0x14, 0xce, 0x15, + 0x12, 0x95, 0x99, 0x8e, 0x80, 0x9f, 0x17, 0xc5, 0x22, 0x57, 0x01, 0x2d, 0xd8, 0x09, 0x40, 0x6e, + 0xf2, 0x29, 0xc5, 0xb0, 0x63, 0xf5, 0x2c, 0xd7, 0x1e, 0xee, 0x78, 0xef, 0x77, 0xf3, 0xc6, 0xaf, + 0xd4, 0x24, 0xc5, 0x90, 0xb7, 0x92, 0xe2, 0x93, 0xed, 0x82, 0x9d, 0xa0, 0xbe, 0x95, 0x53, 0x5f, + 0x04, 0x09, 0x76, 0x6a, 0x3d, 0xcb, 0x6d, 0x71, 0x30, 0xa5, 0xcb, 0x20, 0x41, 0xe7, 0xa9, 0x06, + 0xbf, 0xca, 0x40, 0x8e, 0x94, 0xc5, 0xfa, 0x8b, 0x91, 0x3e, 0xb4, 0xd7, 0xaf, 0xe9, 0xab, 0x5c, + 0x9b, 0x87, 0xdb, 0x43, 0xb7, 0x4a, 0x74, 0xb6, 0xd6, 0x60, 0xc6, 0x38, 0xff, 0xc6, 0xff, 0x86, + 0x15, 0x75, 0x36, 0x81, 0x3f, 0xab, 0xb3, 0x15, 0xf2, 0x7a, 0x2e, 0xdf, 0xaf, 0x92, 0xf3, 0x12, + 0x2e, 0xc5, 0xbf, 0xd5, 0x46, 0x6d, 0xf4, 0x03, 0x9a, 0xc6, 0xe4, 0x3c, 0x5a, 0xd0, 0x1e, 0x67, + 0xb1, 0x8e, 0xde, 0x9c, 0xe5, 0x2e, 0x43, 0xd2, 0xec, 0x18, 0x1a, 0x3a, 0xa0, 0x05, 0x75, 0xac, + 0x5e, 0xdd, 0xb5, 0x87, 0x7b, 0x55, 0x61, 0x6b, 0x3f, 0x8f, 0x1b, 0x9e, 0x0d, 0xa0, 0x91, 0x3f, + 0x99, 0xe5, 0x09, 0xfe, 0x57, 0x37, 0xa6, 0x99, 0xe6, 0x86, 0x73, 0xae, 0xe1, 0xdf, 0xe6, 0x08, + 0x94, 0x4a, 0x41, 0xc8, 0x4e, 0xe1, 0xbb, 0x99, 0xb3, 0x98, 0xa2, 0xff, 0xe1, 0x14, 0x66, 0x3b, + 0x5e, 0xf4, 0x8c, 0xea, 0xcf, 0x96, 0x75, 0xd3, 0xcc, 0x9f, 0xda, 0xd1, 0x4b, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xdf, 0x63, 0x5f, 0x61, 0x4b, 0x03, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/serving/input.pb.go b/executor/proto/tensorflow/serving/input.pb.go new file mode 100644 index 0000000000..e6cc7a1ad3 --- /dev/null +++ b/executor/proto/tensorflow/serving/input.pb.go @@ -0,0 +1,274 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/apis/input.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + example "github.com/tensorflow/tensorflow/tensorflow/go/core/example" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Specifies one or more fully independent input Examples. +// See examples at: +// https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/example.proto +type ExampleList struct { + Examples []*example.Example `protobuf:"bytes,1,rep,name=examples,proto3" json:"examples,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExampleList) Reset() { *m = ExampleList{} } +func (m *ExampleList) String() string { return proto.CompactTextString(m) } +func (*ExampleList) ProtoMessage() {} +func (*ExampleList) Descriptor() ([]byte, []int) { + return fileDescriptor_68144b991b8eca1f, []int{0} +} + +func (m *ExampleList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExampleList.Unmarshal(m, b) +} +func (m *ExampleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExampleList.Marshal(b, m, deterministic) +} +func (m *ExampleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExampleList.Merge(m, src) +} +func (m *ExampleList) XXX_Size() int { + return xxx_messageInfo_ExampleList.Size(m) +} +func (m *ExampleList) XXX_DiscardUnknown() { + xxx_messageInfo_ExampleList.DiscardUnknown(m) +} + +var xxx_messageInfo_ExampleList proto.InternalMessageInfo + +func (m *ExampleList) GetExamples() []*example.Example { + if m != nil { + return m.Examples + } + return nil +} + +// Specifies one or more independent input Examples, with a common context +// Example. +// +// The common use case for context is to cleanly and optimally specify some +// features that are common across multiple examples. +// +// See example below with a search query as the context and multiple restaurants +// to perform some inference on. +// +// context: { +// features: { +// feature: { +// key : "query" +// value: { +// bytes_list: { +// value: [ "pizza" ] +// } +// } +// } +// } +// } +// examples: { +// features: { +// feature: { +// key : "cuisine" +// value: { +// bytes_list: { +// value: [ "Pizzeria" ] +// } +// } +// } +// } +// } +// examples: { +// features: { +// feature: { +// key : "cuisine" +// value: { +// bytes_list: { +// value: [ "Taqueria" ] +// } +// } +// } +// } +// } +// +// Implementations of ExampleListWithContext merge the context Example into each +// of the Examples. Note that feature keys must not be duplicated between the +// Examples and context Example, or the behavior is undefined. +// +// See also: +// tensorflow/core/example/example.proto +// https://developers.google.com/protocol-buffers/docs/proto3#maps +type ExampleListWithContext struct { + Examples []*example.Example `protobuf:"bytes,1,rep,name=examples,proto3" json:"examples,omitempty"` + Context *example.Example `protobuf:"bytes,2,opt,name=context,proto3" json:"context,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExampleListWithContext) Reset() { *m = ExampleListWithContext{} } +func (m *ExampleListWithContext) String() string { return proto.CompactTextString(m) } +func (*ExampleListWithContext) ProtoMessage() {} +func (*ExampleListWithContext) Descriptor() ([]byte, []int) { + return fileDescriptor_68144b991b8eca1f, []int{1} +} + +func (m *ExampleListWithContext) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExampleListWithContext.Unmarshal(m, b) +} +func (m *ExampleListWithContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExampleListWithContext.Marshal(b, m, deterministic) +} +func (m *ExampleListWithContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExampleListWithContext.Merge(m, src) +} +func (m *ExampleListWithContext) XXX_Size() int { + return xxx_messageInfo_ExampleListWithContext.Size(m) +} +func (m *ExampleListWithContext) XXX_DiscardUnknown() { + xxx_messageInfo_ExampleListWithContext.DiscardUnknown(m) +} + +var xxx_messageInfo_ExampleListWithContext proto.InternalMessageInfo + +func (m *ExampleListWithContext) GetExamples() []*example.Example { + if m != nil { + return m.Examples + } + return nil +} + +func (m *ExampleListWithContext) GetContext() *example.Example { + if m != nil { + return m.Context + } + return nil +} + +type Input struct { + // Types that are valid to be assigned to Kind: + // *Input_ExampleList + // *Input_ExampleListWithContext + Kind isInput_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Input) Reset() { *m = Input{} } +func (m *Input) String() string { return proto.CompactTextString(m) } +func (*Input) ProtoMessage() {} +func (*Input) Descriptor() ([]byte, []int) { + return fileDescriptor_68144b991b8eca1f, []int{2} +} + +func (m *Input) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Input.Unmarshal(m, b) +} +func (m *Input) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Input.Marshal(b, m, deterministic) +} +func (m *Input) XXX_Merge(src proto.Message) { + xxx_messageInfo_Input.Merge(m, src) +} +func (m *Input) XXX_Size() int { + return xxx_messageInfo_Input.Size(m) +} +func (m *Input) XXX_DiscardUnknown() { + xxx_messageInfo_Input.DiscardUnknown(m) +} + +var xxx_messageInfo_Input proto.InternalMessageInfo + +type isInput_Kind interface { + isInput_Kind() +} + +type Input_ExampleList struct { + ExampleList *ExampleList `protobuf:"bytes,1,opt,name=example_list,json=exampleList,proto3,oneof"` +} + +type Input_ExampleListWithContext struct { + ExampleListWithContext *ExampleListWithContext `protobuf:"bytes,2,opt,name=example_list_with_context,json=exampleListWithContext,proto3,oneof"` +} + +func (*Input_ExampleList) isInput_Kind() {} + +func (*Input_ExampleListWithContext) isInput_Kind() {} + +func (m *Input) GetKind() isInput_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Input) GetExampleList() *ExampleList { + if x, ok := m.GetKind().(*Input_ExampleList); ok { + return x.ExampleList + } + return nil +} + +func (m *Input) GetExampleListWithContext() *ExampleListWithContext { + if x, ok := m.GetKind().(*Input_ExampleListWithContext); ok { + return x.ExampleListWithContext + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Input) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Input_ExampleList)(nil), + (*Input_ExampleListWithContext)(nil), + } +} + +func init() { + proto.RegisterType((*ExampleList)(nil), "tensorflow.serving.ExampleList") + proto.RegisterType((*ExampleListWithContext)(nil), "tensorflow.serving.ExampleListWithContext") + proto.RegisterType((*Input)(nil), "tensorflow.serving.Input") +} + +func init() { + proto.RegisterFile("tensorflow_serving/apis/input.proto", fileDescriptor_68144b991b8eca1f) +} + +var fileDescriptor_68144b991b8eca1f = []byte{ + // 242 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x2e, 0x49, 0xcd, 0x2b, + 0xce, 0x2f, 0x4a, 0xcb, 0xc9, 0x2f, 0x8f, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0xcc, 0x4b, 0xd7, 0x4f, + 0x2c, 0xc8, 0x2c, 0xd6, 0xcf, 0xcc, 0x2b, 0x28, 0x2d, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, + 0x12, 0x42, 0x28, 0xd2, 0x83, 0x2a, 0x92, 0x52, 0x45, 0x88, 0xe9, 0x27, 0xe7, 0x17, 0xa5, 0xea, + 0xa7, 0x56, 0x24, 0xe6, 0x16, 0xe4, 0xc0, 0x69, 0x88, 0x56, 0x25, 0x3b, 0x2e, 0x6e, 0x57, 0x88, + 0x80, 0x4f, 0x66, 0x71, 0x89, 0x90, 0x3e, 0x17, 0x07, 0x54, 0xbe, 0x58, 0x82, 0x51, 0x81, 0x59, + 0x83, 0xdb, 0x48, 0x58, 0x0f, 0xc9, 0x70, 0xa8, 0xd2, 0x20, 0xb8, 0x22, 0xa5, 0x0a, 0x2e, 0x31, + 0x24, 0xfd, 0xe1, 0x99, 0x25, 0x19, 0xce, 0xf9, 0x79, 0x25, 0xa9, 0x15, 0xa4, 0x1b, 0x25, 0xa4, + 0xcb, 0xc5, 0x9e, 0x0c, 0xd1, 0x2b, 0xc1, 0xa4, 0xc0, 0x88, 0x4b, 0x3d, 0x4c, 0x8d, 0xd2, 0x31, + 0x46, 0x2e, 0x56, 0x4f, 0x50, 0x20, 0x08, 0x79, 0x70, 0xf1, 0x40, 0x0d, 0x89, 0xcf, 0xc9, 0x2c, + 0x2e, 0x91, 0x60, 0x04, 0xeb, 0x96, 0xd7, 0xc3, 0x0c, 0x15, 0x3d, 0x24, 0xb7, 0x3a, 0x31, 0x69, + 0x30, 0x7a, 0x30, 0x04, 0x71, 0xa7, 0x22, 0x79, 0x3f, 0x9b, 0x4b, 0x12, 0xd9, 0xa4, 0xf8, 0xf2, + 0xcc, 0x92, 0x8c, 0x78, 0x54, 0x47, 0x69, 0x11, 0x30, 0x16, 0x29, 0x08, 0xa0, 0x36, 0x88, 0xa5, + 0x62, 0x97, 0x65, 0xe3, 0x62, 0xc9, 0xce, 0xcc, 0x4b, 0x71, 0x62, 0xfe, 0xc1, 0xc8, 0x98, 0xc4, + 0x06, 0x8e, 0x0e, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2d, 0x4d, 0x99, 0xf6, 0xf0, 0x01, + 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/serving/log_collector_config.pb.go b/executor/proto/tensorflow/serving/log_collector_config.pb.go new file mode 100644 index 0000000000..033e21044a --- /dev/null +++ b/executor/proto/tensorflow/serving/log_collector_config.pb.go @@ -0,0 +1,92 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/config/log_collector_config.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type LogCollectorConfig struct { + // Identifies the type of the LogCollector we will use to collect these logs. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The prefix to use for the filenames of the logs. + FilenamePrefix string `protobuf:"bytes,2,opt,name=filename_prefix,json=filenamePrefix,proto3" json:"filename_prefix,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogCollectorConfig) Reset() { *m = LogCollectorConfig{} } +func (m *LogCollectorConfig) String() string { return proto.CompactTextString(m) } +func (*LogCollectorConfig) ProtoMessage() {} +func (*LogCollectorConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_f4f82912e099f4ae, []int{0} +} + +func (m *LogCollectorConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogCollectorConfig.Unmarshal(m, b) +} +func (m *LogCollectorConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogCollectorConfig.Marshal(b, m, deterministic) +} +func (m *LogCollectorConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogCollectorConfig.Merge(m, src) +} +func (m *LogCollectorConfig) XXX_Size() int { + return xxx_messageInfo_LogCollectorConfig.Size(m) +} +func (m *LogCollectorConfig) XXX_DiscardUnknown() { + xxx_messageInfo_LogCollectorConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_LogCollectorConfig proto.InternalMessageInfo + +func (m *LogCollectorConfig) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *LogCollectorConfig) GetFilenamePrefix() string { + if m != nil { + return m.FilenamePrefix + } + return "" +} + +func init() { + proto.RegisterType((*LogCollectorConfig)(nil), "tensorflow.serving.LogCollectorConfig") +} + +func init() { + proto.RegisterFile("tensorflow_serving/config/log_collector_config.proto", fileDescriptor_f4f82912e099f4ae) +} + +var fileDescriptor_f4f82912e099f4ae = []byte{ + // 150 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x29, 0x49, 0xcd, 0x2b, + 0xce, 0x2f, 0x4a, 0xcb, 0xc9, 0x2f, 0x8f, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0xcc, 0x4b, 0xd7, 0x4f, + 0xce, 0xcf, 0x4b, 0xcb, 0x4c, 0xd7, 0xcf, 0xc9, 0x4f, 0x8f, 0x4f, 0xce, 0xcf, 0xc9, 0x49, 0x4d, + 0x2e, 0xc9, 0x2f, 0x8a, 0x87, 0x08, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x21, 0x74, + 0xe9, 0x41, 0x75, 0x29, 0x05, 0x72, 0x09, 0xf9, 0xe4, 0xa7, 0x3b, 0xc3, 0x34, 0x38, 0x83, 0xd5, + 0x0b, 0x09, 0x71, 0xb1, 0x94, 0x54, 0x16, 0xa4, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x81, + 0xd9, 0x42, 0xea, 0x5c, 0xfc, 0x69, 0x99, 0x39, 0xa9, 0x79, 0x89, 0xb9, 0xa9, 0xf1, 0x05, 0x45, + 0xa9, 0x69, 0x99, 0x15, 0x12, 0x4c, 0x60, 0x69, 0x3e, 0x98, 0x70, 0x00, 0x58, 0xd4, 0x89, 0xf9, + 0x07, 0x23, 0x63, 0x12, 0x1b, 0xd8, 0x4a, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x08, 0x43, + 0x4b, 0x67, 0xaa, 0x00, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/serving/logging.pb.go b/executor/proto/tensorflow/serving/logging.pb.go new file mode 100644 index 0000000000..f46b2be83f --- /dev/null +++ b/executor/proto/tensorflow/serving/logging.pb.go @@ -0,0 +1,105 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/core/logging.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Metadata logged along with the request logs. +type LogMetadata struct { + ModelSpec *ModelSpec `protobuf:"bytes,1,opt,name=model_spec,json=modelSpec,proto3" json:"model_spec,omitempty"` + SamplingConfig *SamplingConfig `protobuf:"bytes,2,opt,name=sampling_config,json=samplingConfig,proto3" json:"sampling_config,omitempty"` + // List of tags used to load the relevant MetaGraphDef from SavedModel. + SavedModelTags []string `protobuf:"bytes,3,rep,name=saved_model_tags,json=savedModelTags,proto3" json:"saved_model_tags,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogMetadata) Reset() { *m = LogMetadata{} } +func (m *LogMetadata) String() string { return proto.CompactTextString(m) } +func (*LogMetadata) ProtoMessage() {} +func (*LogMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_b61adc125a8f9545, []int{0} +} + +func (m *LogMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogMetadata.Unmarshal(m, b) +} +func (m *LogMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogMetadata.Marshal(b, m, deterministic) +} +func (m *LogMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogMetadata.Merge(m, src) +} +func (m *LogMetadata) XXX_Size() int { + return xxx_messageInfo_LogMetadata.Size(m) +} +func (m *LogMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_LogMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_LogMetadata proto.InternalMessageInfo + +func (m *LogMetadata) GetModelSpec() *ModelSpec { + if m != nil { + return m.ModelSpec + } + return nil +} + +func (m *LogMetadata) GetSamplingConfig() *SamplingConfig { + if m != nil { + return m.SamplingConfig + } + return nil +} + +func (m *LogMetadata) GetSavedModelTags() []string { + if m != nil { + return m.SavedModelTags + } + return nil +} + +func init() { + proto.RegisterType((*LogMetadata)(nil), "tensorflow.serving.LogMetadata") +} + +func init() { + proto.RegisterFile("tensorflow_serving/core/logging.proto", fileDescriptor_b61adc125a8f9545) +} + +var fileDescriptor_b61adc125a8f9545 = []byte{ + // 225 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x8f, 0x41, 0x4b, 0xc3, 0x40, + 0x10, 0x85, 0x59, 0x03, 0x42, 0xb7, 0xa0, 0xb2, 0xa7, 0x50, 0x10, 0x4a, 0x45, 0xc8, 0x69, 0x03, + 0x7a, 0xf5, 0xa4, 0x47, 0xed, 0xa5, 0xf5, 0x1e, 0xc6, 0x64, 0x3a, 0x04, 0x92, 0xcc, 0xb2, 0xb3, + 0xc4, 0xff, 0xe7, 0xaf, 0xf2, 0x28, 0xdd, 0x6d, 0x09, 0xd2, 0xdc, 0x76, 0xe0, 0x7b, 0xdf, 0x7b, + 0xab, 0x1f, 0x03, 0x0e, 0xc2, 0xfe, 0xd0, 0xf1, 0x77, 0x25, 0xe8, 0xc7, 0x76, 0xa0, 0xb2, 0x66, + 0x8f, 0x65, 0xc7, 0x44, 0xed, 0x40, 0xd6, 0x79, 0x0e, 0x6c, 0xcc, 0x84, 0xd9, 0x13, 0xb6, 0x7a, + 0x98, 0x89, 0x82, 0x6b, 0xa5, 0xec, 0xb9, 0xc1, 0x2e, 0x05, 0x57, 0x76, 0xd6, 0x3f, 0x1c, 0x5a, + 0x3a, 0x37, 0x54, 0xe9, 0x4c, 0xfc, 0xe6, 0x47, 0xe9, 0xe5, 0x07, 0xd3, 0x16, 0x03, 0x34, 0x10, + 0xc0, 0xbc, 0x68, 0x1d, 0x75, 0x95, 0x38, 0xac, 0x73, 0xb5, 0x56, 0xc5, 0xf2, 0xe9, 0xde, 0x5e, + 0xae, 0xb1, 0xdb, 0x23, 0xb5, 0x77, 0x58, 0xef, 0x16, 0xfd, 0xf9, 0x69, 0xde, 0xf5, 0xad, 0x40, + 0xef, 0xba, 0xa9, 0x26, 0xbf, 0x8a, 0x8a, 0xcd, 0x9c, 0x62, 0x7f, 0x42, 0xdf, 0x22, 0xb9, 0xbb, + 0x91, 0x7f, 0xb7, 0x29, 0xf4, 0x9d, 0xc0, 0x88, 0x4d, 0x95, 0x06, 0x05, 0x20, 0xc9, 0xb3, 0x75, + 0x56, 0x2c, 0x8e, 0xe4, 0x88, 0x4d, 0x5c, 0xf0, 0x09, 0x24, 0xaf, 0xd9, 0xaf, 0x52, 0x5f, 0xd7, + 0xf1, 0x43, 0xcf, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xad, 0x15, 0x8a, 0xde, 0x62, 0x01, 0x00, + 0x00, +} diff --git a/executor/proto/tensorflow/serving/logging_config.pb.go b/executor/proto/tensorflow/serving/logging_config.pb.go new file mode 100644 index 0000000000..0388ebd6a8 --- /dev/null +++ b/executor/proto/tensorflow/serving/logging_config.pb.go @@ -0,0 +1,136 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/config/logging_config.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type SamplingConfig struct { + // Requests will be logged uniformly at random with this probability. Valid + // range: [0, 1.0]. + SamplingRate float64 `protobuf:"fixed64,1,opt,name=sampling_rate,json=samplingRate,proto3" json:"sampling_rate,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SamplingConfig) Reset() { *m = SamplingConfig{} } +func (m *SamplingConfig) String() string { return proto.CompactTextString(m) } +func (*SamplingConfig) ProtoMessage() {} +func (*SamplingConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_7e467ec096fa84f9, []int{0} +} + +func (m *SamplingConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SamplingConfig.Unmarshal(m, b) +} +func (m *SamplingConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SamplingConfig.Marshal(b, m, deterministic) +} +func (m *SamplingConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_SamplingConfig.Merge(m, src) +} +func (m *SamplingConfig) XXX_Size() int { + return xxx_messageInfo_SamplingConfig.Size(m) +} +func (m *SamplingConfig) XXX_DiscardUnknown() { + xxx_messageInfo_SamplingConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_SamplingConfig proto.InternalMessageInfo + +func (m *SamplingConfig) GetSamplingRate() float64 { + if m != nil { + return m.SamplingRate + } + return 0 +} + +// Configuration for logging query/responses. +type LoggingConfig struct { + LogCollectorConfig *LogCollectorConfig `protobuf:"bytes,1,opt,name=log_collector_config,json=logCollectorConfig,proto3" json:"log_collector_config,omitempty"` + SamplingConfig *SamplingConfig `protobuf:"bytes,2,opt,name=sampling_config,json=samplingConfig,proto3" json:"sampling_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LoggingConfig) Reset() { *m = LoggingConfig{} } +func (m *LoggingConfig) String() string { return proto.CompactTextString(m) } +func (*LoggingConfig) ProtoMessage() {} +func (*LoggingConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_7e467ec096fa84f9, []int{1} +} + +func (m *LoggingConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LoggingConfig.Unmarshal(m, b) +} +func (m *LoggingConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LoggingConfig.Marshal(b, m, deterministic) +} +func (m *LoggingConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoggingConfig.Merge(m, src) +} +func (m *LoggingConfig) XXX_Size() int { + return xxx_messageInfo_LoggingConfig.Size(m) +} +func (m *LoggingConfig) XXX_DiscardUnknown() { + xxx_messageInfo_LoggingConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_LoggingConfig proto.InternalMessageInfo + +func (m *LoggingConfig) GetLogCollectorConfig() *LogCollectorConfig { + if m != nil { + return m.LogCollectorConfig + } + return nil +} + +func (m *LoggingConfig) GetSamplingConfig() *SamplingConfig { + if m != nil { + return m.SamplingConfig + } + return nil +} + +func init() { + proto.RegisterType((*SamplingConfig)(nil), "tensorflow.serving.SamplingConfig") + proto.RegisterType((*LoggingConfig)(nil), "tensorflow.serving.LoggingConfig") +} + +func init() { + proto.RegisterFile("tensorflow_serving/config/logging_config.proto", fileDescriptor_7e467ec096fa84f9) +} + +var fileDescriptor_7e467ec096fa84f9 = []byte{ + // 203 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x2b, 0x49, 0xcd, 0x2b, + 0xce, 0x2f, 0x4a, 0xcb, 0xc9, 0x2f, 0x8f, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0xcc, 0x4b, 0xd7, 0x4f, + 0xce, 0xcf, 0x4b, 0xcb, 0x4c, 0xd7, 0xcf, 0xc9, 0x4f, 0x4f, 0xcf, 0xcc, 0x4b, 0x8f, 0x87, 0x70, + 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x84, 0x10, 0xea, 0xf5, 0xa0, 0xea, 0xa5, 0x4c, 0xf0, + 0x9a, 0x11, 0x9f, 0x9c, 0x9f, 0x93, 0x93, 0x9a, 0x5c, 0x92, 0x5f, 0x84, 0x62, 0x92, 0x92, 0x29, + 0x17, 0x5f, 0x70, 0x62, 0x6e, 0x41, 0x4e, 0x66, 0x5e, 0xba, 0x33, 0x58, 0x5c, 0x48, 0x99, 0x8b, + 0xb7, 0x18, 0x2a, 0x12, 0x5f, 0x94, 0x58, 0x92, 0x2a, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x18, 0xc4, + 0x03, 0x13, 0x0c, 0x4a, 0x2c, 0x49, 0x55, 0xda, 0xc6, 0xc8, 0xc5, 0xeb, 0x03, 0x71, 0x19, 0x54, + 0x5b, 0x04, 0x97, 0x08, 0x36, 0x6b, 0xc0, 0xba, 0xb9, 0x8d, 0xd4, 0xf4, 0x30, 0x5d, 0xac, 0xe7, + 0x93, 0x9f, 0xee, 0x0c, 0x53, 0x0e, 0x31, 0x25, 0x48, 0x28, 0x07, 0x43, 0x4c, 0xc8, 0x9b, 0x8b, + 0x1f, 0xee, 0x20, 0xa8, 0xa1, 0x4c, 0x60, 0x43, 0x95, 0xb0, 0x19, 0x8a, 0xea, 0x9b, 0x20, 0xbe, + 0x62, 0x14, 0xbe, 0x13, 0xf3, 0x0f, 0x46, 0xc6, 0x24, 0x36, 0xb0, 0xdf, 0x8d, 0x01, 0x01, 0x00, + 0x00, 0xff, 0xff, 0x3f, 0xc0, 0xa7, 0xc5, 0x77, 0x01, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/serving/model.pb.go b/executor/proto/tensorflow/serving/model.pb.go new file mode 100644 index 0000000000..cf4b23bc03 --- /dev/null +++ b/executor/proto/tensorflow/serving/model.pb.go @@ -0,0 +1,159 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/apis/model.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Metadata for an inference request such as the model name and version. +type ModelSpec struct { + // Required servable name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Optional choice of which version of the model to use. + // + // Recommended to be left unset in the common case. Should be specified only + // when there is a strong version consistency requirement. + // + // When left unspecified, the system will serve the best available version. + // This is typically the latest version, though during version transitions, + // notably when serving on a fleet of instances, may be either the previous or + // new version. + // + // Types that are valid to be assigned to VersionChoice: + // *ModelSpec_Version + // *ModelSpec_VersionLabel + VersionChoice isModelSpec_VersionChoice `protobuf_oneof:"version_choice"` + // A named signature to evaluate. If unspecified, the default signature will + // be used. + SignatureName string `protobuf:"bytes,3,opt,name=signature_name,json=signatureName,proto3" json:"signature_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ModelSpec) Reset() { *m = ModelSpec{} } +func (m *ModelSpec) String() string { return proto.CompactTextString(m) } +func (*ModelSpec) ProtoMessage() {} +func (*ModelSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_371147c41a4c13ea, []int{0} +} + +func (m *ModelSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ModelSpec.Unmarshal(m, b) +} +func (m *ModelSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ModelSpec.Marshal(b, m, deterministic) +} +func (m *ModelSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ModelSpec.Merge(m, src) +} +func (m *ModelSpec) XXX_Size() int { + return xxx_messageInfo_ModelSpec.Size(m) +} +func (m *ModelSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ModelSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ModelSpec proto.InternalMessageInfo + +func (m *ModelSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type isModelSpec_VersionChoice interface { + isModelSpec_VersionChoice() +} + +type ModelSpec_Version struct { + Version *wrappers.Int64Value `protobuf:"bytes,2,opt,name=version,proto3,oneof"` +} + +type ModelSpec_VersionLabel struct { + VersionLabel string `protobuf:"bytes,4,opt,name=version_label,json=versionLabel,proto3,oneof"` +} + +func (*ModelSpec_Version) isModelSpec_VersionChoice() {} + +func (*ModelSpec_VersionLabel) isModelSpec_VersionChoice() {} + +func (m *ModelSpec) GetVersionChoice() isModelSpec_VersionChoice { + if m != nil { + return m.VersionChoice + } + return nil +} + +func (m *ModelSpec) GetVersion() *wrappers.Int64Value { + if x, ok := m.GetVersionChoice().(*ModelSpec_Version); ok { + return x.Version + } + return nil +} + +func (m *ModelSpec) GetVersionLabel() string { + if x, ok := m.GetVersionChoice().(*ModelSpec_VersionLabel); ok { + return x.VersionLabel + } + return "" +} + +func (m *ModelSpec) GetSignatureName() string { + if m != nil { + return m.SignatureName + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ModelSpec) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ModelSpec_Version)(nil), + (*ModelSpec_VersionLabel)(nil), + } +} + +func init() { + proto.RegisterType((*ModelSpec)(nil), "tensorflow.serving.ModelSpec") +} + +func init() { + proto.RegisterFile("tensorflow_serving/apis/model.proto", fileDescriptor_371147c41a4c13ea) +} + +var fileDescriptor_371147c41a4c13ea = []byte{ + // 233 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x8e, 0xc1, 0x4b, 0xc3, 0x30, + 0x14, 0xc6, 0x17, 0x37, 0x94, 0x45, 0x37, 0x24, 0xa7, 0xa2, 0x20, 0x43, 0x19, 0xec, 0x94, 0x80, + 0x8a, 0xde, 0x77, 0x9a, 0xa0, 0x1e, 0x2a, 0x78, 0x2d, 0x69, 0x7d, 0x8b, 0x81, 0x34, 0x2f, 0x24, + 0xe9, 0xfa, 0xaf, 0xf9, 0xa7, 0x79, 0x94, 0xa6, 0xad, 0xbb, 0x7d, 0x7c, 0xfc, 0xde, 0xef, 0x7b, + 0xf4, 0x2e, 0x82, 0x0d, 0xe8, 0xf7, 0x06, 0xdb, 0x22, 0x80, 0x3f, 0x68, 0xab, 0x84, 0x74, 0x3a, + 0x88, 0x1a, 0xbf, 0xc0, 0x70, 0xe7, 0x31, 0x22, 0x63, 0x47, 0x88, 0x0f, 0xd0, 0xd5, 0x8d, 0x42, + 0x54, 0x06, 0x44, 0x22, 0xca, 0x66, 0x2f, 0x5a, 0x2f, 0x9d, 0x03, 0x1f, 0xfa, 0x9b, 0xdb, 0x1f, + 0x42, 0xe7, 0x6f, 0x9d, 0xe3, 0xc3, 0x41, 0xc5, 0x18, 0x9d, 0x59, 0x59, 0x43, 0x46, 0x56, 0x64, + 0x33, 0xcf, 0x53, 0x66, 0xcf, 0xf4, 0xec, 0x00, 0x3e, 0x68, 0xb4, 0xd9, 0xc9, 0x8a, 0x6c, 0xce, + 0xef, 0xaf, 0x79, 0xef, 0xe4, 0xa3, 0x93, 0xbf, 0xd8, 0xf8, 0xf4, 0xf8, 0x29, 0x4d, 0x03, 0xbb, + 0x49, 0x3e, 0xd2, 0x6c, 0x4d, 0x17, 0x43, 0x2c, 0x8c, 0x2c, 0xc1, 0x64, 0xb3, 0xce, 0xba, 0x9b, + 0xe4, 0x17, 0x43, 0xfd, 0xda, 0xb5, 0x6c, 0x4d, 0x97, 0x41, 0x2b, 0x2b, 0x63, 0xe3, 0xa1, 0x48, + 0xeb, 0xd3, 0xb4, 0xbe, 0xf8, 0x6f, 0xdf, 0x65, 0x0d, 0xdb, 0x4b, 0xba, 0x1c, 0x6d, 0xd5, 0x37, + 0xea, 0x0a, 0xb6, 0xd3, 0x5f, 0x42, 0xca, 0xd3, 0xf4, 0xc4, 0xc3, 0x5f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xd5, 0x26, 0x44, 0xd1, 0x21, 0x01, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/serving/model_management.pb.go b/executor/proto/tensorflow/serving/model_management.pb.go new file mode 100644 index 0000000000..8f3e14184c --- /dev/null +++ b/executor/proto/tensorflow/serving/model_management.pb.go @@ -0,0 +1,125 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/apis/model_management.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type ReloadConfigRequest struct { + Config *ModelServerConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReloadConfigRequest) Reset() { *m = ReloadConfigRequest{} } +func (m *ReloadConfigRequest) String() string { return proto.CompactTextString(m) } +func (*ReloadConfigRequest) ProtoMessage() {} +func (*ReloadConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_abcd87c5decbd4dd, []int{0} +} + +func (m *ReloadConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReloadConfigRequest.Unmarshal(m, b) +} +func (m *ReloadConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReloadConfigRequest.Marshal(b, m, deterministic) +} +func (m *ReloadConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReloadConfigRequest.Merge(m, src) +} +func (m *ReloadConfigRequest) XXX_Size() int { + return xxx_messageInfo_ReloadConfigRequest.Size(m) +} +func (m *ReloadConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReloadConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReloadConfigRequest proto.InternalMessageInfo + +func (m *ReloadConfigRequest) GetConfig() *ModelServerConfig { + if m != nil { + return m.Config + } + return nil +} + +type ReloadConfigResponse struct { + Status *StatusProto `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReloadConfigResponse) Reset() { *m = ReloadConfigResponse{} } +func (m *ReloadConfigResponse) String() string { return proto.CompactTextString(m) } +func (*ReloadConfigResponse) ProtoMessage() {} +func (*ReloadConfigResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_abcd87c5decbd4dd, []int{1} +} + +func (m *ReloadConfigResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReloadConfigResponse.Unmarshal(m, b) +} +func (m *ReloadConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReloadConfigResponse.Marshal(b, m, deterministic) +} +func (m *ReloadConfigResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReloadConfigResponse.Merge(m, src) +} +func (m *ReloadConfigResponse) XXX_Size() int { + return xxx_messageInfo_ReloadConfigResponse.Size(m) +} +func (m *ReloadConfigResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReloadConfigResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReloadConfigResponse proto.InternalMessageInfo + +func (m *ReloadConfigResponse) GetStatus() *StatusProto { + if m != nil { + return m.Status + } + return nil +} + +func init() { + proto.RegisterType((*ReloadConfigRequest)(nil), "tensorflow.serving.ReloadConfigRequest") + proto.RegisterType((*ReloadConfigResponse)(nil), "tensorflow.serving.ReloadConfigResponse") +} + +func init() { + proto.RegisterFile("tensorflow_serving/apis/model_management.proto", fileDescriptor_abcd87c5decbd4dd) +} + +var fileDescriptor_abcd87c5decbd4dd = []byte{ + // 208 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x8e, 0x31, 0x6b, 0xc3, 0x30, + 0x14, 0x84, 0x11, 0x05, 0x0f, 0xea, 0xa6, 0x76, 0x28, 0x5e, 0x5a, 0x4c, 0x0b, 0x9d, 0x64, 0xa8, + 0x87, 0x4e, 0x5d, 0xda, 0xb9, 0xb4, 0xd8, 0xd9, 0x8d, 0x12, 0x3f, 0x1b, 0x81, 0xac, 0xe7, 0xe8, + 0xc9, 0xc9, 0x5f, 0xcf, 0x18, 0x2c, 0x09, 0x42, 0x88, 0xd7, 0xe3, 0xbb, 0xef, 0x8e, 0x4b, 0x0f, + 0x96, 0xd0, 0xf5, 0x06, 0x8f, 0x2d, 0x81, 0x3b, 0x68, 0x3b, 0x94, 0x6a, 0xd2, 0x54, 0x8e, 0xd8, + 0x81, 0x69, 0x47, 0x65, 0xd5, 0x00, 0x23, 0x58, 0x2f, 0x27, 0x87, 0x1e, 0x85, 0xb8, 0xf0, 0x32, + 0xf1, 0x79, 0xb5, 0xe2, 0xd8, 0xa1, 0xed, 0xf5, 0x90, 0x2c, 0x4b, 0x08, 0xae, 0x8d, 0x59, 0x14, + 0xe5, 0xaf, 0x2b, 0xa5, 0xd9, 0x6b, 0x53, 0x92, 0x57, 0x7e, 0xa6, 0x48, 0x15, 0x1b, 0xfe, 0x50, + 0x83, 0x41, 0xd5, 0xfd, 0x84, 0x6e, 0x0d, 0xfb, 0x19, 0xc8, 0x8b, 0x2f, 0x9e, 0x45, 0xd9, 0x13, + 0x7b, 0x61, 0xef, 0xf7, 0x1f, 0x6f, 0xf2, 0xf6, 0x96, 0xfc, 0x5d, 0xb6, 0x9b, 0x30, 0x9d, 0xda, + 0xa9, 0x54, 0xfc, 0xf1, 0xc7, 0x6b, 0x2b, 0x4d, 0x68, 0x09, 0xc4, 0x27, 0xcf, 0xe2, 0x7a, 0xd2, + 0x3e, 0xaf, 0x69, 0x9b, 0x40, 0xfc, 0x2f, 0xf7, 0xea, 0x84, 0x7f, 0xdf, 0x9d, 0x18, 0xdb, 0x66, + 0xe1, 0x72, 0x75, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x23, 0xf4, 0x92, 0xdb, 0x53, 0x01, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/serving/model_server_config.pb.go b/executor/proto/tensorflow/serving/model_server_config.pb.go new file mode 100644 index 0000000000..82df724f3f --- /dev/null +++ b/executor/proto/tensorflow/serving/model_server_config.pb.go @@ -0,0 +1,351 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/config/model_server_config.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The type of model. +// TODO(b/31336131): DEPRECATED. +type ModelType int32 + +const ( + ModelType_MODEL_TYPE_UNSPECIFIED ModelType = 0 // Deprecated: Do not use. + ModelType_TENSORFLOW ModelType = 1 // Deprecated: Do not use. + ModelType_OTHER ModelType = 2 // Deprecated: Do not use. +) + +var ModelType_name = map[int32]string{ + 0: "MODEL_TYPE_UNSPECIFIED", + 1: "TENSORFLOW", + 2: "OTHER", +} + +var ModelType_value = map[string]int32{ + "MODEL_TYPE_UNSPECIFIED": 0, + "TENSORFLOW": 1, + "OTHER": 2, +} + +func (x ModelType) String() string { + return proto.EnumName(ModelType_name, int32(x)) +} + +func (ModelType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_a1532d213e503811, []int{0} +} + +// Common configuration for loading a model being served. +type ModelConfig struct { + // Name of the model. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Base path to the model, excluding the version directory. + // E.g> for a model at /foo/bar/my_model/123, where 123 is the version, the + // base path is /foo/bar/my_model. + // + // (This can be changed once a model is in serving, *if* the underlying data + // remains the same. Otherwise there are no guarantees about whether the old + // or new data will be used for model versions currently loaded.) + BasePath string `protobuf:"bytes,2,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"` + // Type of model. + // TODO(b/31336131): DEPRECATED. Please use 'model_platform' instead. + ModelType ModelType `protobuf:"varint,3,opt,name=model_type,json=modelType,proto3,enum=tensorflow.serving.ModelType" json:"model_type,omitempty"` // Deprecated: Do not use. + // Type of model (e.g. "tensorflow"). + // + // (This cannot be changed once a model is in serving.) + ModelPlatform string `protobuf:"bytes,4,opt,name=model_platform,json=modelPlatform,proto3" json:"model_platform,omitempty"` + // Version policy for the model indicating which version(s) of the model to + // load and make available for serving simultaneously. + // The default option is to serve only the latest version of the model. + // + // (This can be changed once a model is in serving.) + ModelVersionPolicy *FileSystemStoragePathSourceConfig_ServableVersionPolicy `protobuf:"bytes,7,opt,name=model_version_policy,json=modelVersionPolicy,proto3" json:"model_version_policy,omitempty"` + // String labels to associate with versions of the model, allowing inference + // queries to refer to versions by label instead of number. Multiple labels + // can map to the same version, but not vice-versa. + // + // An envisioned use-case for these labels is canarying tentative versions. + // For example, one can assign labels "stable" and "canary" to two specific + // versions. Perhaps initially "stable" is assigned to version 0 and "canary" + // to version 1. Once version 1 passes canary, one can shift the "stable" + // label to refer to version 1 (at that point both labels map to the same + // version -- version 1 -- which is fine). Later once version 2 is ready to + // canary one can move the "canary" label to version 2. And so on. + VersionLabels map[string]int64 `protobuf:"bytes,8,rep,name=version_labels,json=versionLabels,proto3" json:"version_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + // Configures logging requests and responses, to the model. + // + // (This can be changed once a model is in serving.) + LoggingConfig *LoggingConfig `protobuf:"bytes,6,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ModelConfig) Reset() { *m = ModelConfig{} } +func (m *ModelConfig) String() string { return proto.CompactTextString(m) } +func (*ModelConfig) ProtoMessage() {} +func (*ModelConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_a1532d213e503811, []int{0} +} + +func (m *ModelConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ModelConfig.Unmarshal(m, b) +} +func (m *ModelConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ModelConfig.Marshal(b, m, deterministic) +} +func (m *ModelConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ModelConfig.Merge(m, src) +} +func (m *ModelConfig) XXX_Size() int { + return xxx_messageInfo_ModelConfig.Size(m) +} +func (m *ModelConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ModelConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ModelConfig proto.InternalMessageInfo + +func (m *ModelConfig) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ModelConfig) GetBasePath() string { + if m != nil { + return m.BasePath + } + return "" +} + +// Deprecated: Do not use. +func (m *ModelConfig) GetModelType() ModelType { + if m != nil { + return m.ModelType + } + return ModelType_MODEL_TYPE_UNSPECIFIED +} + +func (m *ModelConfig) GetModelPlatform() string { + if m != nil { + return m.ModelPlatform + } + return "" +} + +func (m *ModelConfig) GetModelVersionPolicy() *FileSystemStoragePathSourceConfig_ServableVersionPolicy { + if m != nil { + return m.ModelVersionPolicy + } + return nil +} + +func (m *ModelConfig) GetVersionLabels() map[string]int64 { + if m != nil { + return m.VersionLabels + } + return nil +} + +func (m *ModelConfig) GetLoggingConfig() *LoggingConfig { + if m != nil { + return m.LoggingConfig + } + return nil +} + +// Static list of models to be loaded for serving. +type ModelConfigList struct { + Config []*ModelConfig `protobuf:"bytes,1,rep,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ModelConfigList) Reset() { *m = ModelConfigList{} } +func (m *ModelConfigList) String() string { return proto.CompactTextString(m) } +func (*ModelConfigList) ProtoMessage() {} +func (*ModelConfigList) Descriptor() ([]byte, []int) { + return fileDescriptor_a1532d213e503811, []int{1} +} + +func (m *ModelConfigList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ModelConfigList.Unmarshal(m, b) +} +func (m *ModelConfigList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ModelConfigList.Marshal(b, m, deterministic) +} +func (m *ModelConfigList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ModelConfigList.Merge(m, src) +} +func (m *ModelConfigList) XXX_Size() int { + return xxx_messageInfo_ModelConfigList.Size(m) +} +func (m *ModelConfigList) XXX_DiscardUnknown() { + xxx_messageInfo_ModelConfigList.DiscardUnknown(m) +} + +var xxx_messageInfo_ModelConfigList proto.InternalMessageInfo + +func (m *ModelConfigList) GetConfig() []*ModelConfig { + if m != nil { + return m.Config + } + return nil +} + +// ModelServer config. +type ModelServerConfig struct { + // ModelServer takes either a static file-based model config list or an Any + // proto representing custom model config that is fetched dynamically at + // runtime (through network RPC, custom service, etc.). + // + // Types that are valid to be assigned to Config: + // *ModelServerConfig_ModelConfigList + // *ModelServerConfig_CustomModelConfig + Config isModelServerConfig_Config `protobuf_oneof:"config"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ModelServerConfig) Reset() { *m = ModelServerConfig{} } +func (m *ModelServerConfig) String() string { return proto.CompactTextString(m) } +func (*ModelServerConfig) ProtoMessage() {} +func (*ModelServerConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_a1532d213e503811, []int{2} +} + +func (m *ModelServerConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ModelServerConfig.Unmarshal(m, b) +} +func (m *ModelServerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ModelServerConfig.Marshal(b, m, deterministic) +} +func (m *ModelServerConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ModelServerConfig.Merge(m, src) +} +func (m *ModelServerConfig) XXX_Size() int { + return xxx_messageInfo_ModelServerConfig.Size(m) +} +func (m *ModelServerConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ModelServerConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ModelServerConfig proto.InternalMessageInfo + +type isModelServerConfig_Config interface { + isModelServerConfig_Config() +} + +type ModelServerConfig_ModelConfigList struct { + ModelConfigList *ModelConfigList `protobuf:"bytes,1,opt,name=model_config_list,json=modelConfigList,proto3,oneof"` +} + +type ModelServerConfig_CustomModelConfig struct { + CustomModelConfig *any.Any `protobuf:"bytes,2,opt,name=custom_model_config,json=customModelConfig,proto3,oneof"` +} + +func (*ModelServerConfig_ModelConfigList) isModelServerConfig_Config() {} + +func (*ModelServerConfig_CustomModelConfig) isModelServerConfig_Config() {} + +func (m *ModelServerConfig) GetConfig() isModelServerConfig_Config { + if m != nil { + return m.Config + } + return nil +} + +func (m *ModelServerConfig) GetModelConfigList() *ModelConfigList { + if x, ok := m.GetConfig().(*ModelServerConfig_ModelConfigList); ok { + return x.ModelConfigList + } + return nil +} + +func (m *ModelServerConfig) GetCustomModelConfig() *any.Any { + if x, ok := m.GetConfig().(*ModelServerConfig_CustomModelConfig); ok { + return x.CustomModelConfig + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ModelServerConfig) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ModelServerConfig_ModelConfigList)(nil), + (*ModelServerConfig_CustomModelConfig)(nil), + } +} + +func init() { + proto.RegisterEnum("tensorflow.serving.ModelType", ModelType_name, ModelType_value) + proto.RegisterType((*ModelConfig)(nil), "tensorflow.serving.ModelConfig") + proto.RegisterMapType((map[string]int64)(nil), "tensorflow.serving.ModelConfig.VersionLabelsEntry") + proto.RegisterType((*ModelConfigList)(nil), "tensorflow.serving.ModelConfigList") + proto.RegisterType((*ModelServerConfig)(nil), "tensorflow.serving.ModelServerConfig") +} + +func init() { + proto.RegisterFile("tensorflow_serving/config/model_server_config.proto", fileDescriptor_a1532d213e503811) +} + +var fileDescriptor_a1532d213e503811 = []byte{ + // 581 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x53, 0x5d, 0x6f, 0xd3, 0x30, + 0x14, 0x9d, 0xdb, 0xae, 0xb4, 0xb7, 0x6a, 0xd7, 0x99, 0x09, 0x85, 0x22, 0xa0, 0x0c, 0x21, 0x55, + 0x3c, 0x24, 0x52, 0xf6, 0x00, 0xe2, 0x69, 0x6c, 0x4b, 0xd5, 0x8d, 0x7e, 0x91, 0x14, 0xd0, 0x9e, + 0xa2, 0xb4, 0xb8, 0x59, 0x84, 0x13, 0x47, 0xb1, 0x5b, 0x94, 0x07, 0x7e, 0x05, 0x7f, 0x86, 0x9f, + 0xc6, 0x23, 0x8a, 0x9d, 0x42, 0xca, 0x3a, 0xed, 0x2d, 0x3e, 0x39, 0xf7, 0x9c, 0x7b, 0xee, 0xb5, + 0xe1, 0x44, 0x90, 0x88, 0xb3, 0x64, 0x49, 0xd9, 0x77, 0x97, 0x93, 0x64, 0x1d, 0x44, 0xbe, 0xb1, + 0x60, 0xd1, 0x32, 0xf0, 0x8d, 0x90, 0x7d, 0x25, 0x54, 0x82, 0x24, 0x71, 0x15, 0xa6, 0xc7, 0x09, + 0x13, 0x0c, 0xe3, 0x7f, 0x45, 0x7a, 0x5e, 0xd4, 0x79, 0xec, 0x33, 0xe6, 0x53, 0x62, 0x48, 0xc6, + 0x7c, 0xb5, 0x34, 0xbc, 0x28, 0x55, 0xf4, 0x8e, 0x7e, 0xb7, 0x07, 0x65, 0xbe, 0x1f, 0x44, 0xfe, + 0x96, 0x7c, 0x67, 0xb4, 0x83, 0xcf, 0xd9, 0x2a, 0x59, 0x10, 0x6e, 0x70, 0xc1, 0x12, 0xcf, 0x27, + 0x6e, 0xec, 0x89, 0x1b, 0x63, 0x19, 0x50, 0xe2, 0xf2, 0x94, 0x0b, 0x12, 0xba, 0xc5, 0x1f, 0xae, + 0x62, 0x2b, 0xb9, 0xe3, 0x9f, 0x15, 0x68, 0x8c, 0xb2, 0x2c, 0xe7, 0xd2, 0x04, 0x63, 0xa8, 0x44, + 0x5e, 0x48, 0x34, 0xd4, 0x45, 0xbd, 0xba, 0x2d, 0xbf, 0xf1, 0x13, 0xa8, 0xcf, 0x3d, 0xae, 0xaa, + 0xb5, 0x92, 0xfc, 0x51, 0xcb, 0x80, 0xa9, 0x27, 0x6e, 0xf0, 0x29, 0x80, 0x9a, 0x85, 0x48, 0x63, + 0xa2, 0x95, 0xbb, 0xa8, 0xd7, 0x32, 0x9f, 0xea, 0xb7, 0x67, 0xa0, 0x4b, 0x97, 0x59, 0x1a, 0x93, + 0xb3, 0x92, 0x86, 0xec, 0x7a, 0xb8, 0x39, 0xe2, 0x57, 0xd0, 0x52, 0x0a, 0x31, 0xf5, 0xc4, 0x92, + 0x25, 0xa1, 0x56, 0x91, 0x1e, 0x4d, 0x89, 0x4e, 0x73, 0x10, 0xff, 0x80, 0x23, 0x45, 0x5b, 0x93, + 0x84, 0x07, 0x2c, 0x72, 0x63, 0x46, 0x83, 0x45, 0xaa, 0x3d, 0xe8, 0xa2, 0x5e, 0xc3, 0xfc, 0xb0, + 0xcb, 0xb2, 0x1f, 0x50, 0xe2, 0xc8, 0x09, 0x38, 0x6a, 0x00, 0x59, 0xc7, 0x8e, 0x8c, 0xaf, 0xe2, + 0xea, 0x0e, 0x49, 0xd6, 0xde, 0x9c, 0x92, 0xcf, 0x4a, 0x73, 0x2a, 0x25, 0x6d, 0x2c, 0x8d, 0xb6, + 0x30, 0x7c, 0x0d, 0xad, 0x8d, 0x31, 0xf5, 0xe6, 0x84, 0x72, 0xad, 0xd6, 0x2d, 0xf7, 0x1a, 0xa6, + 0x79, 0x67, 0xd6, 0xdc, 0x22, 0x97, 0x19, 0xca, 0x22, 0x2b, 0x12, 0x49, 0x6a, 0x37, 0xd7, 0x45, + 0x0c, 0x0f, 0xa0, 0xb5, 0xbd, 0x6a, 0xad, 0x2a, 0x33, 0xbd, 0xd8, 0x25, 0x3d, 0x54, 0x4c, 0x25, + 0x6e, 0x37, 0x69, 0xf1, 0xd8, 0x39, 0x05, 0x7c, 0xdb, 0x0e, 0xb7, 0xa1, 0xfc, 0x8d, 0xa4, 0xf9, + 0x4a, 0xb3, 0x4f, 0x7c, 0x04, 0xfb, 0x6b, 0x8f, 0xae, 0x88, 0xdc, 0x66, 0xd9, 0x56, 0x87, 0x77, + 0xa5, 0xb7, 0xe8, 0xaa, 0x52, 0xdb, 0x6f, 0x57, 0x8f, 0xaf, 0xe0, 0xa0, 0x10, 0x61, 0x18, 0x70, + 0x81, 0xdf, 0x40, 0x35, 0x6f, 0x0e, 0xc9, 0xdc, 0xcf, 0xef, 0xc9, 0x6d, 0xe7, 0xf4, 0xe3, 0x5f, + 0x08, 0x0e, 0x25, 0xee, 0xc8, 0xc7, 0x92, 0xdf, 0xb3, 0x8f, 0x70, 0xa8, 0xb6, 0xa9, 0x58, 0x2e, + 0x0d, 0xb8, 0x90, 0x1d, 0x36, 0xcc, 0x97, 0xf7, 0x28, 0x67, 0xed, 0x0c, 0xf6, 0xec, 0x83, 0xf0, + 0xbf, 0x0e, 0xfb, 0xf0, 0x70, 0xb1, 0xe2, 0x82, 0x85, 0x6e, 0x51, 0x59, 0x46, 0x6c, 0x98, 0x47, + 0xba, 0x7a, 0x82, 0xfa, 0xe6, 0x09, 0xea, 0xef, 0xa3, 0x74, 0xb0, 0x67, 0x1f, 0xaa, 0x92, 0x82, + 0xfc, 0x59, 0x6d, 0x93, 0xf4, 0xf5, 0x18, 0xea, 0x7f, 0x6f, 0x2d, 0x7e, 0x06, 0x8f, 0x46, 0x93, + 0x0b, 0x6b, 0xe8, 0xce, 0xae, 0xa7, 0x96, 0xfb, 0x69, 0xec, 0x4c, 0xad, 0xf3, 0xcb, 0xfe, 0xa5, + 0x75, 0xd1, 0xde, 0xeb, 0x94, 0x6a, 0x08, 0x63, 0x80, 0x99, 0x35, 0x76, 0x26, 0x76, 0x7f, 0x38, + 0xf9, 0xd2, 0x46, 0x12, 0x6b, 0xc2, 0xfe, 0x64, 0x36, 0xb0, 0xec, 0x76, 0x29, 0x3b, 0x9e, 0x95, + 0x7f, 0x23, 0x34, 0xaf, 0xca, 0x0e, 0x4e, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, 0x26, 0x49, 0xef, + 0x06, 0x5d, 0x04, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/serving/model_service.pb.go b/executor/proto/tensorflow/serving/model_service.pb.go new file mode 100644 index 0000000000..5fcfe8c5fa --- /dev/null +++ b/executor/proto/tensorflow/serving/model_service.pb.go @@ -0,0 +1,176 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/apis/model_service.proto + +package serving + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +func init() { + proto.RegisterFile("tensorflow_serving/apis/model_service.proto", fileDescriptor_59da5a1442bba4ef) +} + +var fileDescriptor_59da5a1442bba4ef = []byte{ + // 198 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0xcf, 0xbd, 0x0a, 0xc2, 0x30, + 0x10, 0xc0, 0x71, 0x8a, 0xe0, 0x10, 0xc4, 0x21, 0x9b, 0x7d, 0x01, 0xbf, 0x20, 0x05, 0x7d, 0x03, + 0x1d, 0x74, 0x71, 0xa9, 0x0f, 0x50, 0xa2, 0xbd, 0x86, 0x42, 0x7a, 0x57, 0x9b, 0xab, 0x3e, 0xb2, + 0xaf, 0xe0, 0x28, 0x36, 0x05, 0x11, 0x5b, 0x74, 0xbd, 0xfc, 0xf8, 0xe7, 0x4e, 0x2c, 0x19, 0xd0, + 0x51, 0x95, 0x59, 0xba, 0x25, 0x0e, 0xaa, 0x6b, 0x8e, 0x26, 0xd2, 0x65, 0xee, 0xa2, 0x82, 0x52, + 0xb0, 0x7e, 0x74, 0x06, 0x55, 0x56, 0xc4, 0x24, 0xe5, 0x1b, 0xab, 0x16, 0x87, 0xaa, 0x2f, 0x60, + 0x80, 0x93, 0x36, 0xc2, 0x9a, 0x6b, 0xe7, 0x1b, 0xfd, 0xde, 0xdb, 0x42, 0xa3, 0x36, 0x50, 0x00, + 0xb2, 0xf7, 0xab, 0x7b, 0x20, 0x46, 0x87, 0xd7, 0xd3, 0xd1, 0xaf, 0x22, 0x8d, 0x18, 0xef, 0x80, + 0xfd, 0xa8, 0x09, 0xcb, 0xb9, 0xfa, 0xde, 0x4b, 0x7d, 0x9a, 0x18, 0x2e, 0x35, 0x38, 0x0e, 0x17, + 0xff, 0x50, 0x57, 0x12, 0x3a, 0x90, 0x28, 0x26, 0x7b, 0x8d, 0xa9, 0x85, 0x18, 0x2c, 0xe9, 0x74, + 0x4b, 0x98, 0xe5, 0xa6, 0x0d, 0xc9, 0x69, 0x57, 0xa8, 0x03, 0x86, 0xb3, 0xdf, 0xd0, 0xff, 0xb7, + 0x19, 0x3c, 0x82, 0xe0, 0x34, 0x6c, 0xae, 0x5e, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe8, 0xc5, + 0x56, 0x18, 0x98, 0x01, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ModelServiceClient is the client API for ModelService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ModelServiceClient interface { + // Gets status of model. If the ModelSpec in the request does not specify + // version, information about all versions of the model will be returned. If + // the ModelSpec in the request does specify a version, the status of only + // that version will be returned. + GetModelStatus(ctx context.Context, in *GetModelStatusRequest, opts ...grpc.CallOption) (*GetModelStatusResponse, error) + // Reloads the set of served models. The new config supersedes the old one, + // so if a model is omitted from the new config it will be unloaded and no + // longer served. + HandleReloadConfigRequest(ctx context.Context, in *ReloadConfigRequest, opts ...grpc.CallOption) (*ReloadConfigResponse, error) +} + +type modelServiceClient struct { + cc *grpc.ClientConn +} + +func NewModelServiceClient(cc *grpc.ClientConn) ModelServiceClient { + return &modelServiceClient{cc} +} + +func (c *modelServiceClient) GetModelStatus(ctx context.Context, in *GetModelStatusRequest, opts ...grpc.CallOption) (*GetModelStatusResponse, error) { + out := new(GetModelStatusResponse) + err := c.cc.Invoke(ctx, "/tensorflow.serving.ModelService/GetModelStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *modelServiceClient) HandleReloadConfigRequest(ctx context.Context, in *ReloadConfigRequest, opts ...grpc.CallOption) (*ReloadConfigResponse, error) { + out := new(ReloadConfigResponse) + err := c.cc.Invoke(ctx, "/tensorflow.serving.ModelService/HandleReloadConfigRequest", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ModelServiceServer is the server API for ModelService service. +type ModelServiceServer interface { + // Gets status of model. If the ModelSpec in the request does not specify + // version, information about all versions of the model will be returned. If + // the ModelSpec in the request does specify a version, the status of only + // that version will be returned. + GetModelStatus(context.Context, *GetModelStatusRequest) (*GetModelStatusResponse, error) + // Reloads the set of served models. The new config supersedes the old one, + // so if a model is omitted from the new config it will be unloaded and no + // longer served. + HandleReloadConfigRequest(context.Context, *ReloadConfigRequest) (*ReloadConfigResponse, error) +} + +// UnimplementedModelServiceServer can be embedded to have forward compatible implementations. +type UnimplementedModelServiceServer struct { +} + +func (*UnimplementedModelServiceServer) GetModelStatus(ctx context.Context, req *GetModelStatusRequest) (*GetModelStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetModelStatus not implemented") +} +func (*UnimplementedModelServiceServer) HandleReloadConfigRequest(ctx context.Context, req *ReloadConfigRequest) (*ReloadConfigResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method HandleReloadConfigRequest not implemented") +} + +func RegisterModelServiceServer(s *grpc.Server, srv ModelServiceServer) { + s.RegisterService(&_ModelService_serviceDesc, srv) +} + +func _ModelService_GetModelStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetModelStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServiceServer).GetModelStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tensorflow.serving.ModelService/GetModelStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServiceServer).GetModelStatus(ctx, req.(*GetModelStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModelService_HandleReloadConfigRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReloadConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServiceServer).HandleReloadConfigRequest(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tensorflow.serving.ModelService/HandleReloadConfigRequest", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServiceServer).HandleReloadConfigRequest(ctx, req.(*ReloadConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ModelService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tensorflow.serving.ModelService", + HandlerType: (*ModelServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetModelStatus", + Handler: _ModelService_GetModelStatus_Handler, + }, + { + MethodName: "HandleReloadConfigRequest", + Handler: _ModelService_HandleReloadConfigRequest_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "tensorflow_serving/apis/model_service.proto", +} diff --git a/executor/proto/tensorflow/serving/monitoring_config.pb.go b/executor/proto/tensorflow/serving/monitoring_config.pb.go new file mode 100644 index 0000000000..3f7945bc50 --- /dev/null +++ b/executor/proto/tensorflow/serving/monitoring_config.pb.go @@ -0,0 +1,136 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/config/monitoring_config.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Configuration for Prometheus monitoring. +type PrometheusConfig struct { + // Whether to expose Prometheus metrics. + Enable bool `protobuf:"varint,1,opt,name=enable,proto3" json:"enable,omitempty"` + // The endpoint to expose Prometheus metrics. + // If not specified, PrometheusExporter::kPrometheusPath value is used. + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrometheusConfig) Reset() { *m = PrometheusConfig{} } +func (m *PrometheusConfig) String() string { return proto.CompactTextString(m) } +func (*PrometheusConfig) ProtoMessage() {} +func (*PrometheusConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_36ed4c52c00502cb, []int{0} +} + +func (m *PrometheusConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrometheusConfig.Unmarshal(m, b) +} +func (m *PrometheusConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrometheusConfig.Marshal(b, m, deterministic) +} +func (m *PrometheusConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrometheusConfig.Merge(m, src) +} +func (m *PrometheusConfig) XXX_Size() int { + return xxx_messageInfo_PrometheusConfig.Size(m) +} +func (m *PrometheusConfig) XXX_DiscardUnknown() { + xxx_messageInfo_PrometheusConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_PrometheusConfig proto.InternalMessageInfo + +func (m *PrometheusConfig) GetEnable() bool { + if m != nil { + return m.Enable + } + return false +} + +func (m *PrometheusConfig) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +// Configuration for monitoring. +type MonitoringConfig struct { + PrometheusConfig *PrometheusConfig `protobuf:"bytes,1,opt,name=prometheus_config,json=prometheusConfig,proto3" json:"prometheus_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MonitoringConfig) Reset() { *m = MonitoringConfig{} } +func (m *MonitoringConfig) String() string { return proto.CompactTextString(m) } +func (*MonitoringConfig) ProtoMessage() {} +func (*MonitoringConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_36ed4c52c00502cb, []int{1} +} + +func (m *MonitoringConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MonitoringConfig.Unmarshal(m, b) +} +func (m *MonitoringConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MonitoringConfig.Marshal(b, m, deterministic) +} +func (m *MonitoringConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_MonitoringConfig.Merge(m, src) +} +func (m *MonitoringConfig) XXX_Size() int { + return xxx_messageInfo_MonitoringConfig.Size(m) +} +func (m *MonitoringConfig) XXX_DiscardUnknown() { + xxx_messageInfo_MonitoringConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_MonitoringConfig proto.InternalMessageInfo + +func (m *MonitoringConfig) GetPrometheusConfig() *PrometheusConfig { + if m != nil { + return m.PrometheusConfig + } + return nil +} + +func init() { + proto.RegisterType((*PrometheusConfig)(nil), "tensorflow.serving.PrometheusConfig") + proto.RegisterType((*MonitoringConfig)(nil), "tensorflow.serving.MonitoringConfig") +} + +func init() { + proto.RegisterFile("tensorflow_serving/config/monitoring_config.proto", fileDescriptor_36ed4c52c00502cb) +} + +var fileDescriptor_36ed4c52c00502cb = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x2c, 0x49, 0xcd, 0x2b, + 0xce, 0x2f, 0x4a, 0xcb, 0xc9, 0x2f, 0x8f, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0xcc, 0x4b, 0xd7, 0x4f, + 0xce, 0xcf, 0x4b, 0xcb, 0x4c, 0xd7, 0xcf, 0xcd, 0xcf, 0xcb, 0x2c, 0xc9, 0x2f, 0xca, 0xcc, 0x4b, + 0x8f, 0x87, 0x88, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x21, 0xb4, 0xe8, 0x41, 0xb5, + 0x28, 0xd9, 0x71, 0x09, 0x04, 0x14, 0xe5, 0xe7, 0xa6, 0x96, 0x64, 0xa4, 0x96, 0x16, 0x3b, 0x83, + 0x55, 0x0b, 0x89, 0x71, 0xb1, 0xa5, 0xe6, 0x25, 0x26, 0xe5, 0xa4, 0x4a, 0x30, 0x2a, 0x30, 0x6a, + 0x70, 0x04, 0x41, 0x79, 0x42, 0x42, 0x5c, 0x2c, 0x05, 0x89, 0x25, 0x19, 0x12, 0x4c, 0x0a, 0x8c, + 0x1a, 0x9c, 0x41, 0x60, 0xb6, 0x52, 0x2a, 0x97, 0x80, 0x2f, 0xdc, 0x3a, 0xa8, 0xfe, 0x40, 0x2e, + 0xc1, 0x02, 0xb8, 0x99, 0x50, 0x27, 0x80, 0x8d, 0xe2, 0x36, 0x52, 0xd1, 0xc3, 0x74, 0x83, 0x1e, + 0xba, 0x03, 0x82, 0x04, 0x0a, 0xd0, 0x44, 0x9c, 0x98, 0x7f, 0x30, 0x32, 0x26, 0xb1, 0x81, 0xbd, + 0x61, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xc2, 0x1d, 0x3a, 0xb9, 0xfb, 0x00, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/serving/platform_config.pb.go b/executor/proto/tensorflow/serving/platform_config.pb.go new file mode 100644 index 0000000000..862fdf74f5 --- /dev/null +++ b/executor/proto/tensorflow/serving/platform_config.pb.go @@ -0,0 +1,135 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/config/platform_config.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Configuration for a servable platform e.g. tensorflow or other ML systems. +type PlatformConfig struct { + // The config proto for a SourceAdapter in the StoragePathSourceAdapter + // registry. + SourceAdapterConfig *any.Any `protobuf:"bytes,1,opt,name=source_adapter_config,json=sourceAdapterConfig,proto3" json:"source_adapter_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PlatformConfig) Reset() { *m = PlatformConfig{} } +func (m *PlatformConfig) String() string { return proto.CompactTextString(m) } +func (*PlatformConfig) ProtoMessage() {} +func (*PlatformConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3919c9a7a426ae, []int{0} +} + +func (m *PlatformConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PlatformConfig.Unmarshal(m, b) +} +func (m *PlatformConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PlatformConfig.Marshal(b, m, deterministic) +} +func (m *PlatformConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlatformConfig.Merge(m, src) +} +func (m *PlatformConfig) XXX_Size() int { + return xxx_messageInfo_PlatformConfig.Size(m) +} +func (m *PlatformConfig) XXX_DiscardUnknown() { + xxx_messageInfo_PlatformConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_PlatformConfig proto.InternalMessageInfo + +func (m *PlatformConfig) GetSourceAdapterConfig() *any.Any { + if m != nil { + return m.SourceAdapterConfig + } + return nil +} + +type PlatformConfigMap struct { + // A map from a platform name to a platform config. The platform name is used + // in ModelConfig.model_platform. + PlatformConfigs map[string]*PlatformConfig `protobuf:"bytes,1,rep,name=platform_configs,json=platformConfigs,proto3" json:"platform_configs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PlatformConfigMap) Reset() { *m = PlatformConfigMap{} } +func (m *PlatformConfigMap) String() string { return proto.CompactTextString(m) } +func (*PlatformConfigMap) ProtoMessage() {} +func (*PlatformConfigMap) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3919c9a7a426ae, []int{1} +} + +func (m *PlatformConfigMap) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PlatformConfigMap.Unmarshal(m, b) +} +func (m *PlatformConfigMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PlatformConfigMap.Marshal(b, m, deterministic) +} +func (m *PlatformConfigMap) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlatformConfigMap.Merge(m, src) +} +func (m *PlatformConfigMap) XXX_Size() int { + return xxx_messageInfo_PlatformConfigMap.Size(m) +} +func (m *PlatformConfigMap) XXX_DiscardUnknown() { + xxx_messageInfo_PlatformConfigMap.DiscardUnknown(m) +} + +var xxx_messageInfo_PlatformConfigMap proto.InternalMessageInfo + +func (m *PlatformConfigMap) GetPlatformConfigs() map[string]*PlatformConfig { + if m != nil { + return m.PlatformConfigs + } + return nil +} + +func init() { + proto.RegisterType((*PlatformConfig)(nil), "tensorflow.serving.PlatformConfig") + proto.RegisterType((*PlatformConfigMap)(nil), "tensorflow.serving.PlatformConfigMap") + proto.RegisterMapType((map[string]*PlatformConfig)(nil), "tensorflow.serving.PlatformConfigMap.PlatformConfigsEntry") +} + +func init() { + proto.RegisterFile("tensorflow_serving/config/platform_config.proto", fileDescriptor_7b3919c9a7a426ae) +} + +var fileDescriptor_7b3919c9a7a426ae = []byte{ + // 252 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x90, 0xc1, 0x4a, 0x03, 0x31, + 0x10, 0x86, 0x49, 0x17, 0x05, 0x53, 0xd0, 0x1a, 0x2b, 0xac, 0x3d, 0x95, 0x3d, 0xf5, 0x94, 0x40, + 0xbd, 0x94, 0xde, 0xaa, 0x08, 0x5e, 0x04, 0xd9, 0xa3, 0x97, 0x25, 0xad, 0x93, 0xa5, 0xb8, 0x66, + 0x42, 0x92, 0xad, 0xe4, 0x75, 0x7d, 0x0a, 0x8f, 0x62, 0xb2, 0x22, 0xed, 0x0a, 0xbd, 0x25, 0xc3, + 0xff, 0x7d, 0xfc, 0x33, 0x54, 0x78, 0xd0, 0x0e, 0xad, 0x6a, 0xf0, 0xa3, 0x72, 0x60, 0x77, 0x5b, + 0x5d, 0x8b, 0x0d, 0x6a, 0xb5, 0xad, 0x85, 0x69, 0xa4, 0x57, 0x68, 0xdf, 0xab, 0xf4, 0xe7, 0xc6, + 0xa2, 0x47, 0xc6, 0xfe, 0x00, 0xde, 0x01, 0x93, 0x9b, 0x1a, 0xb1, 0x6e, 0x40, 0xc4, 0xc4, 0xba, + 0x55, 0x42, 0xea, 0x90, 0xe2, 0xc5, 0x0b, 0x3d, 0x7f, 0xee, 0x3c, 0xf7, 0x51, 0xc3, 0x1e, 0xe9, + 0xb5, 0xc3, 0xd6, 0x6e, 0xa0, 0x92, 0xaf, 0xd2, 0x78, 0xb0, 0x9d, 0x3f, 0x27, 0x53, 0x32, 0x1b, + 0xce, 0xc7, 0x3c, 0xc9, 0xf8, 0xaf, 0x8c, 0xaf, 0x74, 0x28, 0xaf, 0x12, 0xb2, 0x4a, 0x44, 0x32, + 0x15, 0x9f, 0x84, 0x5e, 0xee, 0xcb, 0x9f, 0xa4, 0x61, 0x40, 0x47, 0x07, 0xcd, 0x5d, 0x4e, 0xa6, + 0xd9, 0x6c, 0x38, 0x5f, 0xf2, 0x7e, 0x77, 0xde, 0x13, 0x1c, 0x4c, 0xdc, 0x83, 0xf6, 0x36, 0x94, + 0x17, 0x66, 0x7f, 0x3a, 0x51, 0x74, 0xfc, 0x5f, 0x90, 0x8d, 0x68, 0xf6, 0x06, 0x21, 0x2e, 0x73, + 0x56, 0xfe, 0x3c, 0xd9, 0x82, 0x9e, 0xec, 0x64, 0xd3, 0x42, 0x3e, 0x88, 0x0b, 0x16, 0xc7, 0x5b, + 0x94, 0x09, 0x58, 0x0e, 0x16, 0xe4, 0x2e, 0xfb, 0x22, 0x64, 0x7d, 0x1a, 0x8f, 0x71, 0xfb, 0x1d, + 0x00, 0x00, 0xff, 0xff, 0x50, 0x69, 0x59, 0xfb, 0xae, 0x01, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/serving/predict.pb.go b/executor/proto/tensorflow/serving/predict.pb.go new file mode 100644 index 0000000000..6f5d960f5b --- /dev/null +++ b/executor/proto/tensorflow/serving/predict.pb.go @@ -0,0 +1,179 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/apis/predict.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + framework "github.com/tensorflow/tensorflow/tensorflow/go/core/framework" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// PredictRequest specifies which TensorFlow model to run, as well as +// how inputs are mapped to tensors and how outputs are filtered before +// returning to user. +type PredictRequest struct { + // Model Specification. If version is not specified, will use the latest + // (numerical) version. + ModelSpec *ModelSpec `protobuf:"bytes,1,opt,name=model_spec,json=modelSpec,proto3" json:"model_spec,omitempty"` + // Input tensors. + // Names of input tensor are alias names. The mapping from aliases to real + // input tensor names is stored in the SavedModel export as a prediction + // SignatureDef under the 'inputs' field. + Inputs map[string]*framework.TensorProto `protobuf:"bytes,2,rep,name=inputs,proto3" json:"inputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Output filter. + // Names specified are alias names. The mapping from aliases to real output + // tensor names is stored in the SavedModel export as a prediction + // SignatureDef under the 'outputs' field. + // Only tensors specified here will be run/fetched and returned, with the + // exception that when none is specified, all tensors specified in the + // named signature will be run/fetched and returned. + OutputFilter []string `protobuf:"bytes,3,rep,name=output_filter,json=outputFilter,proto3" json:"output_filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PredictRequest) Reset() { *m = PredictRequest{} } +func (m *PredictRequest) String() string { return proto.CompactTextString(m) } +func (*PredictRequest) ProtoMessage() {} +func (*PredictRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_d0799bd5d5992a93, []int{0} +} + +func (m *PredictRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PredictRequest.Unmarshal(m, b) +} +func (m *PredictRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PredictRequest.Marshal(b, m, deterministic) +} +func (m *PredictRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PredictRequest.Merge(m, src) +} +func (m *PredictRequest) XXX_Size() int { + return xxx_messageInfo_PredictRequest.Size(m) +} +func (m *PredictRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PredictRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PredictRequest proto.InternalMessageInfo + +func (m *PredictRequest) GetModelSpec() *ModelSpec { + if m != nil { + return m.ModelSpec + } + return nil +} + +func (m *PredictRequest) GetInputs() map[string]*framework.TensorProto { + if m != nil { + return m.Inputs + } + return nil +} + +func (m *PredictRequest) GetOutputFilter() []string { + if m != nil { + return m.OutputFilter + } + return nil +} + +// Response for PredictRequest on successful run. +type PredictResponse struct { + // Effective Model Specification used to process PredictRequest. + ModelSpec *ModelSpec `protobuf:"bytes,2,opt,name=model_spec,json=modelSpec,proto3" json:"model_spec,omitempty"` + // Output tensors. + Outputs map[string]*framework.TensorProto `protobuf:"bytes,1,rep,name=outputs,proto3" json:"outputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PredictResponse) Reset() { *m = PredictResponse{} } +func (m *PredictResponse) String() string { return proto.CompactTextString(m) } +func (*PredictResponse) ProtoMessage() {} +func (*PredictResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_d0799bd5d5992a93, []int{1} +} + +func (m *PredictResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PredictResponse.Unmarshal(m, b) +} +func (m *PredictResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PredictResponse.Marshal(b, m, deterministic) +} +func (m *PredictResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PredictResponse.Merge(m, src) +} +func (m *PredictResponse) XXX_Size() int { + return xxx_messageInfo_PredictResponse.Size(m) +} +func (m *PredictResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PredictResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PredictResponse proto.InternalMessageInfo + +func (m *PredictResponse) GetModelSpec() *ModelSpec { + if m != nil { + return m.ModelSpec + } + return nil +} + +func (m *PredictResponse) GetOutputs() map[string]*framework.TensorProto { + if m != nil { + return m.Outputs + } + return nil +} + +func init() { + proto.RegisterType((*PredictRequest)(nil), "tensorflow.serving.PredictRequest") + proto.RegisterMapType((map[string]*framework.TensorProto)(nil), "tensorflow.serving.PredictRequest.InputsEntry") + proto.RegisterType((*PredictResponse)(nil), "tensorflow.serving.PredictResponse") + proto.RegisterMapType((map[string]*framework.TensorProto)(nil), "tensorflow.serving.PredictResponse.OutputsEntry") +} + +func init() { + proto.RegisterFile("tensorflow_serving/apis/predict.proto", fileDescriptor_d0799bd5d5992a93) +} + +var fileDescriptor_d0799bd5d5992a93 = []byte{ + // 325 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x52, 0x4f, 0x4b, 0xfb, 0x40, + 0x14, 0x64, 0x13, 0x7e, 0xfd, 0xd1, 0xd7, 0xfa, 0x87, 0xbd, 0x18, 0x02, 0x42, 0x69, 0x51, 0x7a, + 0x71, 0x23, 0xf5, 0x22, 0xe2, 0x49, 0xb0, 0xa0, 0x20, 0x96, 0xad, 0xf7, 0x52, 0xd3, 0x57, 0x09, + 0x4d, 0xb2, 0xeb, 0xee, 0xa6, 0xa5, 0x9f, 0xc2, 0xaf, 0xea, 0xd1, 0xa3, 0x64, 0xb7, 0xb5, 0xf1, + 0x2f, 0x08, 0xde, 0x1e, 0xf3, 0x66, 0x76, 0xe6, 0x0d, 0x0b, 0x07, 0x06, 0x73, 0x2d, 0xd4, 0x34, + 0x15, 0x8b, 0x91, 0x46, 0x35, 0x4f, 0xf2, 0x87, 0x68, 0x2c, 0x13, 0x1d, 0x49, 0x85, 0x93, 0x24, + 0x36, 0x4c, 0x2a, 0x61, 0x04, 0xa5, 0x1b, 0x1a, 0x5b, 0xd1, 0xc2, 0xc3, 0x0d, 0x16, 0xc5, 0x42, + 0x61, 0x34, 0x55, 0xe3, 0x0c, 0x17, 0x42, 0xcd, 0x22, 0xb7, 0x71, 0xda, 0xb0, 0xf3, 0x9d, 0x45, + 0x26, 0x26, 0x98, 0x3a, 0x52, 0xfb, 0xc9, 0x83, 0xed, 0x81, 0xb3, 0xe4, 0xf8, 0x58, 0xa0, 0x36, + 0xf4, 0x1c, 0xc0, 0x32, 0x46, 0x5a, 0x62, 0x1c, 0x90, 0x16, 0xe9, 0x36, 0x7a, 0xfb, 0xec, 0x73, + 0x10, 0x76, 0x53, 0xb2, 0x86, 0x12, 0x63, 0x5e, 0xcf, 0xd6, 0x23, 0xed, 0x43, 0x2d, 0xc9, 0x65, + 0x61, 0x74, 0xe0, 0xb5, 0xfc, 0x6e, 0xa3, 0xc7, 0xbe, 0x52, 0xbe, 0x77, 0x64, 0x57, 0x56, 0x70, + 0x99, 0x1b, 0xb5, 0xe4, 0x2b, 0x35, 0xed, 0xc0, 0x96, 0x28, 0x8c, 0x2c, 0xcc, 0x68, 0x9a, 0xa4, + 0x06, 0x55, 0xe0, 0xb7, 0xfc, 0x6e, 0x9d, 0x37, 0x1d, 0xd8, 0xb7, 0x58, 0xc8, 0xa1, 0x51, 0xd1, + 0xd2, 0x5d, 0xf0, 0x67, 0xb8, 0xb4, 0x91, 0xeb, 0xbc, 0x1c, 0xe9, 0x11, 0xfc, 0x9b, 0x8f, 0xd3, + 0x02, 0x03, 0xcf, 0x9e, 0xb1, 0x57, 0x0d, 0x73, 0x67, 0xc7, 0x41, 0x59, 0x03, 0x77, 0xac, 0x33, + 0xef, 0x94, 0xb4, 0x9f, 0x09, 0xec, 0xbc, 0xe5, 0xd3, 0x52, 0xe4, 0x1a, 0x3f, 0x54, 0xe2, 0xfd, + 0xb2, 0x92, 0x6b, 0xf8, 0xef, 0x52, 0xeb, 0x80, 0xd8, 0x4e, 0x8e, 0x7f, 0xec, 0xc4, 0x79, 0xb2, + 0x5b, 0x27, 0x71, 0xad, 0xac, 0x1f, 0x08, 0x87, 0xd0, 0xac, 0x2e, 0xfe, 0xe4, 0xe4, 0x0b, 0xff, + 0x85, 0x90, 0xfb, 0x9a, 0xfd, 0x10, 0x27, 0xaf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x57, 0xf4, + 0x30, 0x9a, 0x02, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/serving/prediction_log.pb.go b/executor/proto/tensorflow/serving/prediction_log.pb.go new file mode 100644 index 0000000000..e524d84e67 --- /dev/null +++ b/executor/proto/tensorflow/serving/prediction_log.pb.go @@ -0,0 +1,438 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/apis/prediction_log.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type ClassifyLog struct { + Request *ClassificationRequest `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` + Response *ClassificationResponse `protobuf:"bytes,2,opt,name=response,proto3" json:"response,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClassifyLog) Reset() { *m = ClassifyLog{} } +func (m *ClassifyLog) String() string { return proto.CompactTextString(m) } +func (*ClassifyLog) ProtoMessage() {} +func (*ClassifyLog) Descriptor() ([]byte, []int) { + return fileDescriptor_db62cb1da263d301, []int{0} +} + +func (m *ClassifyLog) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClassifyLog.Unmarshal(m, b) +} +func (m *ClassifyLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClassifyLog.Marshal(b, m, deterministic) +} +func (m *ClassifyLog) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClassifyLog.Merge(m, src) +} +func (m *ClassifyLog) XXX_Size() int { + return xxx_messageInfo_ClassifyLog.Size(m) +} +func (m *ClassifyLog) XXX_DiscardUnknown() { + xxx_messageInfo_ClassifyLog.DiscardUnknown(m) +} + +var xxx_messageInfo_ClassifyLog proto.InternalMessageInfo + +func (m *ClassifyLog) GetRequest() *ClassificationRequest { + if m != nil { + return m.Request + } + return nil +} + +func (m *ClassifyLog) GetResponse() *ClassificationResponse { + if m != nil { + return m.Response + } + return nil +} + +type RegressLog struct { + Request *RegressionRequest `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` + Response *RegressionResponse `protobuf:"bytes,2,opt,name=response,proto3" json:"response,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RegressLog) Reset() { *m = RegressLog{} } +func (m *RegressLog) String() string { return proto.CompactTextString(m) } +func (*RegressLog) ProtoMessage() {} +func (*RegressLog) Descriptor() ([]byte, []int) { + return fileDescriptor_db62cb1da263d301, []int{1} +} + +func (m *RegressLog) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RegressLog.Unmarshal(m, b) +} +func (m *RegressLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RegressLog.Marshal(b, m, deterministic) +} +func (m *RegressLog) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegressLog.Merge(m, src) +} +func (m *RegressLog) XXX_Size() int { + return xxx_messageInfo_RegressLog.Size(m) +} +func (m *RegressLog) XXX_DiscardUnknown() { + xxx_messageInfo_RegressLog.DiscardUnknown(m) +} + +var xxx_messageInfo_RegressLog proto.InternalMessageInfo + +func (m *RegressLog) GetRequest() *RegressionRequest { + if m != nil { + return m.Request + } + return nil +} + +func (m *RegressLog) GetResponse() *RegressionResponse { + if m != nil { + return m.Response + } + return nil +} + +type PredictLog struct { + Request *PredictRequest `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` + Response *PredictResponse `protobuf:"bytes,2,opt,name=response,proto3" json:"response,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PredictLog) Reset() { *m = PredictLog{} } +func (m *PredictLog) String() string { return proto.CompactTextString(m) } +func (*PredictLog) ProtoMessage() {} +func (*PredictLog) Descriptor() ([]byte, []int) { + return fileDescriptor_db62cb1da263d301, []int{2} +} + +func (m *PredictLog) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PredictLog.Unmarshal(m, b) +} +func (m *PredictLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PredictLog.Marshal(b, m, deterministic) +} +func (m *PredictLog) XXX_Merge(src proto.Message) { + xxx_messageInfo_PredictLog.Merge(m, src) +} +func (m *PredictLog) XXX_Size() int { + return xxx_messageInfo_PredictLog.Size(m) +} +func (m *PredictLog) XXX_DiscardUnknown() { + xxx_messageInfo_PredictLog.DiscardUnknown(m) +} + +var xxx_messageInfo_PredictLog proto.InternalMessageInfo + +func (m *PredictLog) GetRequest() *PredictRequest { + if m != nil { + return m.Request + } + return nil +} + +func (m *PredictLog) GetResponse() *PredictResponse { + if m != nil { + return m.Response + } + return nil +} + +type MultiInferenceLog struct { + Request *MultiInferenceRequest `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` + Response *MultiInferenceResponse `protobuf:"bytes,2,opt,name=response,proto3" json:"response,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MultiInferenceLog) Reset() { *m = MultiInferenceLog{} } +func (m *MultiInferenceLog) String() string { return proto.CompactTextString(m) } +func (*MultiInferenceLog) ProtoMessage() {} +func (*MultiInferenceLog) Descriptor() ([]byte, []int) { + return fileDescriptor_db62cb1da263d301, []int{3} +} + +func (m *MultiInferenceLog) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MultiInferenceLog.Unmarshal(m, b) +} +func (m *MultiInferenceLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MultiInferenceLog.Marshal(b, m, deterministic) +} +func (m *MultiInferenceLog) XXX_Merge(src proto.Message) { + xxx_messageInfo_MultiInferenceLog.Merge(m, src) +} +func (m *MultiInferenceLog) XXX_Size() int { + return xxx_messageInfo_MultiInferenceLog.Size(m) +} +func (m *MultiInferenceLog) XXX_DiscardUnknown() { + xxx_messageInfo_MultiInferenceLog.DiscardUnknown(m) +} + +var xxx_messageInfo_MultiInferenceLog proto.InternalMessageInfo + +func (m *MultiInferenceLog) GetRequest() *MultiInferenceRequest { + if m != nil { + return m.Request + } + return nil +} + +func (m *MultiInferenceLog) GetResponse() *MultiInferenceResponse { + if m != nil { + return m.Response + } + return nil +} + +type SessionRunLog struct { + Request *SessionRunRequest `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` + Response *SessionRunResponse `protobuf:"bytes,2,opt,name=response,proto3" json:"response,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SessionRunLog) Reset() { *m = SessionRunLog{} } +func (m *SessionRunLog) String() string { return proto.CompactTextString(m) } +func (*SessionRunLog) ProtoMessage() {} +func (*SessionRunLog) Descriptor() ([]byte, []int) { + return fileDescriptor_db62cb1da263d301, []int{4} +} + +func (m *SessionRunLog) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SessionRunLog.Unmarshal(m, b) +} +func (m *SessionRunLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SessionRunLog.Marshal(b, m, deterministic) +} +func (m *SessionRunLog) XXX_Merge(src proto.Message) { + xxx_messageInfo_SessionRunLog.Merge(m, src) +} +func (m *SessionRunLog) XXX_Size() int { + return xxx_messageInfo_SessionRunLog.Size(m) +} +func (m *SessionRunLog) XXX_DiscardUnknown() { + xxx_messageInfo_SessionRunLog.DiscardUnknown(m) +} + +var xxx_messageInfo_SessionRunLog proto.InternalMessageInfo + +func (m *SessionRunLog) GetRequest() *SessionRunRequest { + if m != nil { + return m.Request + } + return nil +} + +func (m *SessionRunLog) GetResponse() *SessionRunResponse { + if m != nil { + return m.Response + } + return nil +} + +// Logged model inference request. +type PredictionLog struct { + LogMetadata *LogMetadata `protobuf:"bytes,1,opt,name=log_metadata,json=logMetadata,proto3" json:"log_metadata,omitempty"` + // Types that are valid to be assigned to LogType: + // *PredictionLog_ClassifyLog + // *PredictionLog_RegressLog + // *PredictionLog_PredictLog + // *PredictionLog_MultiInferenceLog + // *PredictionLog_SessionRunLog + LogType isPredictionLog_LogType `protobuf_oneof:"log_type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PredictionLog) Reset() { *m = PredictionLog{} } +func (m *PredictionLog) String() string { return proto.CompactTextString(m) } +func (*PredictionLog) ProtoMessage() {} +func (*PredictionLog) Descriptor() ([]byte, []int) { + return fileDescriptor_db62cb1da263d301, []int{5} +} + +func (m *PredictionLog) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PredictionLog.Unmarshal(m, b) +} +func (m *PredictionLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PredictionLog.Marshal(b, m, deterministic) +} +func (m *PredictionLog) XXX_Merge(src proto.Message) { + xxx_messageInfo_PredictionLog.Merge(m, src) +} +func (m *PredictionLog) XXX_Size() int { + return xxx_messageInfo_PredictionLog.Size(m) +} +func (m *PredictionLog) XXX_DiscardUnknown() { + xxx_messageInfo_PredictionLog.DiscardUnknown(m) +} + +var xxx_messageInfo_PredictionLog proto.InternalMessageInfo + +func (m *PredictionLog) GetLogMetadata() *LogMetadata { + if m != nil { + return m.LogMetadata + } + return nil +} + +type isPredictionLog_LogType interface { + isPredictionLog_LogType() +} + +type PredictionLog_ClassifyLog struct { + ClassifyLog *ClassifyLog `protobuf:"bytes,2,opt,name=classify_log,json=classifyLog,proto3,oneof"` +} + +type PredictionLog_RegressLog struct { + RegressLog *RegressLog `protobuf:"bytes,3,opt,name=regress_log,json=regressLog,proto3,oneof"` +} + +type PredictionLog_PredictLog struct { + PredictLog *PredictLog `protobuf:"bytes,6,opt,name=predict_log,json=predictLog,proto3,oneof"` +} + +type PredictionLog_MultiInferenceLog struct { + MultiInferenceLog *MultiInferenceLog `protobuf:"bytes,4,opt,name=multi_inference_log,json=multiInferenceLog,proto3,oneof"` +} + +type PredictionLog_SessionRunLog struct { + SessionRunLog *SessionRunLog `protobuf:"bytes,5,opt,name=session_run_log,json=sessionRunLog,proto3,oneof"` +} + +func (*PredictionLog_ClassifyLog) isPredictionLog_LogType() {} + +func (*PredictionLog_RegressLog) isPredictionLog_LogType() {} + +func (*PredictionLog_PredictLog) isPredictionLog_LogType() {} + +func (*PredictionLog_MultiInferenceLog) isPredictionLog_LogType() {} + +func (*PredictionLog_SessionRunLog) isPredictionLog_LogType() {} + +func (m *PredictionLog) GetLogType() isPredictionLog_LogType { + if m != nil { + return m.LogType + } + return nil +} + +func (m *PredictionLog) GetClassifyLog() *ClassifyLog { + if x, ok := m.GetLogType().(*PredictionLog_ClassifyLog); ok { + return x.ClassifyLog + } + return nil +} + +func (m *PredictionLog) GetRegressLog() *RegressLog { + if x, ok := m.GetLogType().(*PredictionLog_RegressLog); ok { + return x.RegressLog + } + return nil +} + +func (m *PredictionLog) GetPredictLog() *PredictLog { + if x, ok := m.GetLogType().(*PredictionLog_PredictLog); ok { + return x.PredictLog + } + return nil +} + +func (m *PredictionLog) GetMultiInferenceLog() *MultiInferenceLog { + if x, ok := m.GetLogType().(*PredictionLog_MultiInferenceLog); ok { + return x.MultiInferenceLog + } + return nil +} + +func (m *PredictionLog) GetSessionRunLog() *SessionRunLog { + if x, ok := m.GetLogType().(*PredictionLog_SessionRunLog); ok { + return x.SessionRunLog + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*PredictionLog) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*PredictionLog_ClassifyLog)(nil), + (*PredictionLog_RegressLog)(nil), + (*PredictionLog_PredictLog)(nil), + (*PredictionLog_MultiInferenceLog)(nil), + (*PredictionLog_SessionRunLog)(nil), + } +} + +func init() { + proto.RegisterType((*ClassifyLog)(nil), "tensorflow.serving.ClassifyLog") + proto.RegisterType((*RegressLog)(nil), "tensorflow.serving.RegressLog") + proto.RegisterType((*PredictLog)(nil), "tensorflow.serving.PredictLog") + proto.RegisterType((*MultiInferenceLog)(nil), "tensorflow.serving.MultiInferenceLog") + proto.RegisterType((*SessionRunLog)(nil), "tensorflow.serving.SessionRunLog") + proto.RegisterType((*PredictionLog)(nil), "tensorflow.serving.PredictionLog") +} + +func init() { + proto.RegisterFile("tensorflow_serving/apis/prediction_log.proto", fileDescriptor_db62cb1da263d301) +} + +var fileDescriptor_db62cb1da263d301 = []byte{ + // 489 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcf, 0x6e, 0xd3, 0x40, + 0x10, 0xc6, 0xbb, 0x04, 0x4a, 0x35, 0x6e, 0x84, 0x6a, 0x2e, 0x51, 0x0f, 0x14, 0x82, 0x0a, 0x05, + 0x41, 0x22, 0xc1, 0x15, 0xa9, 0xaa, 0x83, 0x50, 0x11, 0xad, 0x54, 0x2d, 0x07, 0x8e, 0x96, 0x71, + 0x37, 0xab, 0x95, 0x9c, 0x5d, 0xb3, 0xbb, 0x06, 0xe5, 0x19, 0x38, 0x21, 0x71, 0xe2, 0xc4, 0x2b, + 0xf1, 0x36, 0x1c, 0xab, 0xfd, 0x13, 0x3b, 0xcd, 0xc6, 0x6e, 0xd4, 0x63, 0xac, 0x6f, 0x7e, 0xfb, + 0x65, 0xbe, 0x99, 0x81, 0x57, 0x9a, 0x70, 0x25, 0xe4, 0xb4, 0x10, 0x3f, 0x52, 0x45, 0xe4, 0x77, + 0xc6, 0xe9, 0x38, 0x2b, 0x99, 0x1a, 0x97, 0x92, 0x5c, 0xb2, 0x5c, 0x33, 0xc1, 0xd3, 0x42, 0xd0, + 0x51, 0x29, 0x85, 0x16, 0x71, 0xdc, 0xa8, 0x47, 0x5e, 0xbd, 0xdf, 0x4a, 0xc8, 0x8b, 0x4c, 0x29, + 0x36, 0x65, 0x79, 0x66, 0x28, 0x8e, 0xb0, 0xff, 0xbc, 0x4d, 0xcd, 0xf8, 0x94, 0x48, 0xc2, 0x73, + 0xe2, 0x85, 0x87, 0x37, 0x18, 0xf3, 0xb2, 0xa3, 0x36, 0x99, 0x24, 0x54, 0x12, 0xa5, 0x9a, 0x97, + 0x5f, 0xb7, 0x29, 0x95, 0x93, 0xb9, 0x8f, 0x9d, 0xef, 0xe7, 0x42, 0x92, 0x71, 0x21, 0x28, 0x65, + 0xdc, 0x77, 0x64, 0xf8, 0x07, 0x41, 0x34, 0x71, 0x7f, 0x74, 0x7e, 0x26, 0x68, 0x3c, 0x81, 0xfb, + 0x92, 0x7c, 0xab, 0x88, 0xd2, 0x03, 0xf4, 0x18, 0x1d, 0x45, 0x6f, 0x5e, 0x8c, 0xc2, 0x9e, 0x8d, + 0x26, 0xd7, 0x5a, 0x83, 0x5d, 0x01, 0x5e, 0x54, 0xc6, 0x1f, 0x60, 0x47, 0x12, 0x55, 0x0a, 0xae, + 0xc8, 0xe0, 0x8e, 0xa5, 0xbc, 0xdc, 0x84, 0xe2, 0x2a, 0x70, 0x5d, 0x3b, 0xfc, 0x85, 0x00, 0xb0, + 0xeb, 0x83, 0xf1, 0x76, 0xbc, 0xea, 0xed, 0x70, 0x1d, 0x15, 0xd7, 0x8d, 0x0b, 0x7c, 0x25, 0x81, + 0xaf, 0x67, 0x37, 0x11, 0x02, 0x4f, 0x3f, 0x11, 0xc0, 0x85, 0x8b, 0xd0, 0x78, 0x7a, 0xb7, 0xea, + 0x69, 0xb8, 0x8e, 0xe8, 0x0b, 0x02, 0x43, 0xc7, 0x81, 0xa1, 0xa7, 0x9d, 0xe5, 0x81, 0x9b, 0xbf, + 0x08, 0xf6, 0xce, 0xab, 0x42, 0xb3, 0x8f, 0x8b, 0xf1, 0xdb, 0x3c, 0xc4, 0xeb, 0x75, 0xb7, 0x0d, + 0x71, 0x95, 0x12, 0x58, 0xfc, 0x8d, 0xa0, 0xff, 0xd9, 0xb7, 0xb3, 0xe2, 0x9b, 0xe7, 0xd8, 0xd4, + 0xdc, 0x36, 0xc7, 0x65, 0x42, 0x60, 0xeb, 0x5f, 0x0f, 0xfa, 0x17, 0xf5, 0x8d, 0x30, 0xb6, 0x12, + 0xd8, 0x2d, 0x04, 0x4d, 0x67, 0x44, 0x67, 0x97, 0x99, 0xce, 0xbc, 0xb7, 0x83, 0x75, 0xe4, 0x33, + 0x41, 0xcf, 0xbd, 0x0c, 0x47, 0x45, 0xf3, 0x23, 0x7e, 0x0f, 0xbb, 0xfe, 0x6c, 0xcc, 0xcd, 0xd9, + 0xf1, 0xee, 0x0e, 0x3a, 0xa6, 0xdf, 0x6c, 0xdd, 0xe9, 0x16, 0x8e, 0xf2, 0xa5, 0x25, 0x3c, 0x81, + 0xc8, 0xaf, 0xbf, 0x85, 0xf4, 0x2c, 0xe4, 0x51, 0xc7, 0xa8, 0x3a, 0x06, 0xc8, 0x66, 0x57, 0x4e, + 0x20, 0xf2, 0x87, 0xc6, 0x22, 0xb6, 0xdb, 0x11, 0xcd, 0x30, 0x1b, 0x44, 0xd9, 0x8c, 0xf6, 0x17, + 0x78, 0x38, 0x33, 0xe1, 0xa6, 0xf5, 0x69, 0xb3, 0xa8, 0xbb, 0xed, 0x91, 0x05, 0x93, 0x78, 0xba, + 0x85, 0xf7, 0x66, 0xc1, 0x78, 0x7e, 0x82, 0x07, 0x8b, 0x9b, 0x25, 0x2b, 0x7b, 0x9e, 0x07, 0xf7, + 0x2c, 0xf4, 0x49, 0x77, 0x8a, 0x0e, 0xd8, 0x57, 0xcb, 0x1f, 0x12, 0x80, 0x1d, 0x93, 0x9a, 0x9e, + 0x97, 0x24, 0xe9, 0xfd, 0x47, 0xe8, 0xeb, 0xb6, 0x3d, 0x6c, 0x6f, 0xaf, 0x02, 0x00, 0x00, 0xff, + 0xff, 0x68, 0x7a, 0x67, 0x00, 0x1a, 0x06, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/serving/prediction_service.pb.go b/executor/proto/tensorflow/serving/prediction_service.pb.go new file mode 100644 index 0000000000..4e9e626747 --- /dev/null +++ b/executor/proto/tensorflow/serving/prediction_service.pb.go @@ -0,0 +1,286 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/apis/prediction_service.proto + +package serving + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +func init() { + proto.RegisterFile("tensorflow_serving/apis/prediction_service.proto", fileDescriptor_6f2588d3ed9ea15a) +} + +var fileDescriptor_6f2588d3ed9ea15a = []byte{ + // 292 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xcf, 0x4b, 0xfb, 0x40, + 0x10, 0xc5, 0x29, 0x85, 0x6f, 0xbf, 0xec, 0x41, 0x74, 0x8f, 0x39, 0x2a, 0xf5, 0x67, 0x49, 0x45, + 0xff, 0x03, 0x7b, 0x10, 0x0f, 0x01, 0x89, 0x17, 0x6f, 0x61, 0x4d, 0x26, 0x61, 0x21, 0xd9, 0x8d, + 0x3b, 0x53, 0xc5, 0xff, 0xdc, 0x83, 0x07, 0xa9, 0x3b, 0x1b, 0x69, 0x4d, 0xda, 0x5e, 0x93, 0xcf, + 0x7b, 0x33, 0xef, 0xed, 0x88, 0x6b, 0x02, 0x83, 0xd6, 0x95, 0xb5, 0x7d, 0xcf, 0x10, 0xdc, 0x9b, + 0x36, 0xd5, 0x5c, 0xb5, 0x1a, 0xe7, 0xad, 0x83, 0x42, 0xe7, 0xa4, 0xad, 0xf1, 0xdf, 0x73, 0x88, + 0x5b, 0x67, 0xc9, 0x4a, 0xf9, 0xab, 0x88, 0x59, 0x11, 0xcd, 0x86, 0x5c, 0xf2, 0x5a, 0x21, 0xea, + 0x52, 0xe7, 0x6a, 0xe5, 0xe4, 0x1d, 0xa2, 0xc1, 0x99, 0x15, 0x50, 0xd6, 0xd8, 0x02, 0xea, 0xac, + 0x01, 0x52, 0x85, 0x22, 0xc5, 0x8a, 0xb3, 0x21, 0x85, 0x36, 0x25, 0x38, 0x30, 0x61, 0xb9, 0x68, + 0xba, 0x23, 0x0e, 0x63, 0xe7, 0x43, 0x98, 0x83, 0xca, 0x01, 0x62, 0xb7, 0xeb, 0xcd, 0xd7, 0x58, + 0x1c, 0x3d, 0x76, 0x55, 0x3c, 0xf9, 0x26, 0xa4, 0x12, 0xff, 0x17, 0x3e, 0xd9, 0x87, 0xbc, 0x88, + 0xff, 0x16, 0x12, 0x2f, 0xd6, 0x72, 0xa7, 0xf0, 0xba, 0x04, 0xa4, 0xe8, 0x72, 0x1f, 0x14, 0x5b, + 0x6b, 0x10, 0xe4, 0xb3, 0x98, 0xa4, 0x7e, 0x19, 0x39, 0xed, 0x93, 0xa5, 0xdd, 0xa6, 0xc1, 0xfd, + 0x74, 0x17, 0xc6, 0xce, 0xa9, 0x98, 0x70, 0x22, 0x79, 0xdc, 0x27, 0xe1, 0x9f, 0xc1, 0xf6, 0x64, + 0x2b, 0xc3, 0x9e, 0x95, 0x38, 0x48, 0x96, 0x35, 0xe9, 0x87, 0xf0, 0x1e, 0xfd, 0xb5, 0xac, 0x33, + 0x5b, 0x6b, 0xd9, 0x44, 0x79, 0x50, 0x23, 0x0e, 0xef, 0x81, 0x92, 0xd5, 0x91, 0x24, 0x7c, 0x23, + 0xf2, 0xaa, 0x4f, 0xbf, 0x49, 0x85, 0x61, 0xb3, 0xfd, 0x60, 0x3f, 0xee, 0x6e, 0xfc, 0x39, 0x1a, + 0xbd, 0xfc, 0xfb, 0x39, 0x85, 0xdb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x63, 0x88, 0x87, 0xa4, + 0x2c, 0x03, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// PredictionServiceClient is the client API for PredictionService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type PredictionServiceClient interface { + // Classify. + Classify(ctx context.Context, in *ClassificationRequest, opts ...grpc.CallOption) (*ClassificationResponse, error) + // Regress. + Regress(ctx context.Context, in *RegressionRequest, opts ...grpc.CallOption) (*RegressionResponse, error) + // Predict -- provides access to loaded TensorFlow model. + Predict(ctx context.Context, in *PredictRequest, opts ...grpc.CallOption) (*PredictResponse, error) + // MultiInference API for multi-headed models. + MultiInference(ctx context.Context, in *MultiInferenceRequest, opts ...grpc.CallOption) (*MultiInferenceResponse, error) + // GetModelMetadata - provides access to metadata for loaded models. + GetModelMetadata(ctx context.Context, in *GetModelMetadataRequest, opts ...grpc.CallOption) (*GetModelMetadataResponse, error) +} + +type predictionServiceClient struct { + cc *grpc.ClientConn +} + +func NewPredictionServiceClient(cc *grpc.ClientConn) PredictionServiceClient { + return &predictionServiceClient{cc} +} + +func (c *predictionServiceClient) Classify(ctx context.Context, in *ClassificationRequest, opts ...grpc.CallOption) (*ClassificationResponse, error) { + out := new(ClassificationResponse) + err := c.cc.Invoke(ctx, "/tensorflow.serving.PredictionService/Classify", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *predictionServiceClient) Regress(ctx context.Context, in *RegressionRequest, opts ...grpc.CallOption) (*RegressionResponse, error) { + out := new(RegressionResponse) + err := c.cc.Invoke(ctx, "/tensorflow.serving.PredictionService/Regress", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *predictionServiceClient) Predict(ctx context.Context, in *PredictRequest, opts ...grpc.CallOption) (*PredictResponse, error) { + out := new(PredictResponse) + err := c.cc.Invoke(ctx, "/tensorflow.serving.PredictionService/Predict", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *predictionServiceClient) MultiInference(ctx context.Context, in *MultiInferenceRequest, opts ...grpc.CallOption) (*MultiInferenceResponse, error) { + out := new(MultiInferenceResponse) + err := c.cc.Invoke(ctx, "/tensorflow.serving.PredictionService/MultiInference", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *predictionServiceClient) GetModelMetadata(ctx context.Context, in *GetModelMetadataRequest, opts ...grpc.CallOption) (*GetModelMetadataResponse, error) { + out := new(GetModelMetadataResponse) + err := c.cc.Invoke(ctx, "/tensorflow.serving.PredictionService/GetModelMetadata", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// PredictionServiceServer is the server API for PredictionService service. +type PredictionServiceServer interface { + // Classify. + Classify(context.Context, *ClassificationRequest) (*ClassificationResponse, error) + // Regress. + Regress(context.Context, *RegressionRequest) (*RegressionResponse, error) + // Predict -- provides access to loaded TensorFlow model. + Predict(context.Context, *PredictRequest) (*PredictResponse, error) + // MultiInference API for multi-headed models. + MultiInference(context.Context, *MultiInferenceRequest) (*MultiInferenceResponse, error) + // GetModelMetadata - provides access to metadata for loaded models. + GetModelMetadata(context.Context, *GetModelMetadataRequest) (*GetModelMetadataResponse, error) +} + +// UnimplementedPredictionServiceServer can be embedded to have forward compatible implementations. +type UnimplementedPredictionServiceServer struct { +} + +func (*UnimplementedPredictionServiceServer) Classify(ctx context.Context, req *ClassificationRequest) (*ClassificationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Classify not implemented") +} +func (*UnimplementedPredictionServiceServer) Regress(ctx context.Context, req *RegressionRequest) (*RegressionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Regress not implemented") +} +func (*UnimplementedPredictionServiceServer) Predict(ctx context.Context, req *PredictRequest) (*PredictResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Predict not implemented") +} +func (*UnimplementedPredictionServiceServer) MultiInference(ctx context.Context, req *MultiInferenceRequest) (*MultiInferenceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MultiInference not implemented") +} +func (*UnimplementedPredictionServiceServer) GetModelMetadata(ctx context.Context, req *GetModelMetadataRequest) (*GetModelMetadataResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetModelMetadata not implemented") +} + +func RegisterPredictionServiceServer(s *grpc.Server, srv PredictionServiceServer) { + s.RegisterService(&_PredictionService_serviceDesc, srv) +} + +func _PredictionService_Classify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ClassificationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PredictionServiceServer).Classify(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tensorflow.serving.PredictionService/Classify", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PredictionServiceServer).Classify(ctx, req.(*ClassificationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PredictionService_Regress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RegressionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PredictionServiceServer).Regress(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tensorflow.serving.PredictionService/Regress", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PredictionServiceServer).Regress(ctx, req.(*RegressionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PredictionService_Predict_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PredictRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PredictionServiceServer).Predict(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tensorflow.serving.PredictionService/Predict", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PredictionServiceServer).Predict(ctx, req.(*PredictRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PredictionService_MultiInference_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MultiInferenceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PredictionServiceServer).MultiInference(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tensorflow.serving.PredictionService/MultiInference", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PredictionServiceServer).MultiInference(ctx, req.(*MultiInferenceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PredictionService_GetModelMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetModelMetadataRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PredictionServiceServer).GetModelMetadata(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tensorflow.serving.PredictionService/GetModelMetadata", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PredictionServiceServer).GetModelMetadata(ctx, req.(*GetModelMetadataRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _PredictionService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tensorflow.serving.PredictionService", + HandlerType: (*PredictionServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Classify", + Handler: _PredictionService_Classify_Handler, + }, + { + MethodName: "Regress", + Handler: _PredictionService_Regress_Handler, + }, + { + MethodName: "Predict", + Handler: _PredictionService_Predict_Handler, + }, + { + MethodName: "MultiInference", + Handler: _PredictionService_MultiInference_Handler, + }, + { + MethodName: "GetModelMetadata", + Handler: _PredictionService_GetModelMetadata_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "tensorflow_serving/apis/prediction_service.proto", +} diff --git a/executor/proto/tensorflow/serving/regression.pb.go b/executor/proto/tensorflow/serving/regression.pb.go new file mode 100644 index 0000000000..e1750c6acd --- /dev/null +++ b/executor/proto/tensorflow/serving/regression.pb.go @@ -0,0 +1,232 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/apis/regression.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Regression result for a single item (tensorflow.Example). +type Regression struct { + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Regression) Reset() { *m = Regression{} } +func (m *Regression) String() string { return proto.CompactTextString(m) } +func (*Regression) ProtoMessage() {} +func (*Regression) Descriptor() ([]byte, []int) { + return fileDescriptor_d9691657f3048b95, []int{0} +} + +func (m *Regression) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Regression.Unmarshal(m, b) +} +func (m *Regression) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Regression.Marshal(b, m, deterministic) +} +func (m *Regression) XXX_Merge(src proto.Message) { + xxx_messageInfo_Regression.Merge(m, src) +} +func (m *Regression) XXX_Size() int { + return xxx_messageInfo_Regression.Size(m) +} +func (m *Regression) XXX_DiscardUnknown() { + xxx_messageInfo_Regression.DiscardUnknown(m) +} + +var xxx_messageInfo_Regression proto.InternalMessageInfo + +func (m *Regression) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +// Contains one result per input example, in the same order as the input in +// RegressionRequest. +type RegressionResult struct { + Regressions []*Regression `protobuf:"bytes,1,rep,name=regressions,proto3" json:"regressions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RegressionResult) Reset() { *m = RegressionResult{} } +func (m *RegressionResult) String() string { return proto.CompactTextString(m) } +func (*RegressionResult) ProtoMessage() {} +func (*RegressionResult) Descriptor() ([]byte, []int) { + return fileDescriptor_d9691657f3048b95, []int{1} +} + +func (m *RegressionResult) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RegressionResult.Unmarshal(m, b) +} +func (m *RegressionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RegressionResult.Marshal(b, m, deterministic) +} +func (m *RegressionResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegressionResult.Merge(m, src) +} +func (m *RegressionResult) XXX_Size() int { + return xxx_messageInfo_RegressionResult.Size(m) +} +func (m *RegressionResult) XXX_DiscardUnknown() { + xxx_messageInfo_RegressionResult.DiscardUnknown(m) +} + +var xxx_messageInfo_RegressionResult proto.InternalMessageInfo + +func (m *RegressionResult) GetRegressions() []*Regression { + if m != nil { + return m.Regressions + } + return nil +} + +type RegressionRequest struct { + // Model Specification. If version is not specified, will use the latest + // (numerical) version. + ModelSpec *ModelSpec `protobuf:"bytes,1,opt,name=model_spec,json=modelSpec,proto3" json:"model_spec,omitempty"` + // Input data. + Input *Input `protobuf:"bytes,2,opt,name=input,proto3" json:"input,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RegressionRequest) Reset() { *m = RegressionRequest{} } +func (m *RegressionRequest) String() string { return proto.CompactTextString(m) } +func (*RegressionRequest) ProtoMessage() {} +func (*RegressionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_d9691657f3048b95, []int{2} +} + +func (m *RegressionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RegressionRequest.Unmarshal(m, b) +} +func (m *RegressionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RegressionRequest.Marshal(b, m, deterministic) +} +func (m *RegressionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegressionRequest.Merge(m, src) +} +func (m *RegressionRequest) XXX_Size() int { + return xxx_messageInfo_RegressionRequest.Size(m) +} +func (m *RegressionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RegressionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RegressionRequest proto.InternalMessageInfo + +func (m *RegressionRequest) GetModelSpec() *ModelSpec { + if m != nil { + return m.ModelSpec + } + return nil +} + +func (m *RegressionRequest) GetInput() *Input { + if m != nil { + return m.Input + } + return nil +} + +type RegressionResponse struct { + // Effective Model Specification used for regression. + ModelSpec *ModelSpec `protobuf:"bytes,2,opt,name=model_spec,json=modelSpec,proto3" json:"model_spec,omitempty"` + Result *RegressionResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RegressionResponse) Reset() { *m = RegressionResponse{} } +func (m *RegressionResponse) String() string { return proto.CompactTextString(m) } +func (*RegressionResponse) ProtoMessage() {} +func (*RegressionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_d9691657f3048b95, []int{3} +} + +func (m *RegressionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RegressionResponse.Unmarshal(m, b) +} +func (m *RegressionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RegressionResponse.Marshal(b, m, deterministic) +} +func (m *RegressionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegressionResponse.Merge(m, src) +} +func (m *RegressionResponse) XXX_Size() int { + return xxx_messageInfo_RegressionResponse.Size(m) +} +func (m *RegressionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RegressionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RegressionResponse proto.InternalMessageInfo + +func (m *RegressionResponse) GetModelSpec() *ModelSpec { + if m != nil { + return m.ModelSpec + } + return nil +} + +func (m *RegressionResponse) GetResult() *RegressionResult { + if m != nil { + return m.Result + } + return nil +} + +func init() { + proto.RegisterType((*Regression)(nil), "tensorflow.serving.Regression") + proto.RegisterType((*RegressionResult)(nil), "tensorflow.serving.RegressionResult") + proto.RegisterType((*RegressionRequest)(nil), "tensorflow.serving.RegressionRequest") + proto.RegisterType((*RegressionResponse)(nil), "tensorflow.serving.RegressionResponse") +} + +func init() { + proto.RegisterFile("tensorflow_serving/apis/regression.proto", fileDescriptor_d9691657f3048b95) +} + +var fileDescriptor_d9691657f3048b95 = []byte{ + // 260 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x91, 0xb1, 0x4e, 0xc3, 0x30, + 0x10, 0x40, 0xe5, 0x54, 0xad, 0xc4, 0x65, 0x01, 0x8b, 0x21, 0x20, 0x81, 0x2a, 0xc3, 0x90, 0x29, + 0x91, 0xca, 0xda, 0x01, 0xb1, 0x31, 0xb0, 0x18, 0xf6, 0xaa, 0x94, 0xa3, 0x8a, 0xe4, 0xda, 0xc6, + 0x67, 0x97, 0x9d, 0x2f, 0xe0, 0x73, 0x19, 0x51, 0xed, 0x40, 0x02, 0xb4, 0x42, 0xdd, 0x12, 0xe9, + 0xbd, 0xf3, 0xf3, 0x19, 0x4a, 0x8f, 0x9a, 0x8c, 0x7b, 0x56, 0xe6, 0x75, 0x46, 0xe8, 0xd6, 0x8d, + 0x5e, 0xd6, 0x73, 0xdb, 0x50, 0xed, 0x70, 0xe9, 0x90, 0xa8, 0x31, 0xba, 0xb2, 0xce, 0x78, 0xc3, + 0x79, 0x47, 0x56, 0x2d, 0x79, 0x7a, 0xb1, 0xcb, 0x6e, 0xb4, 0x0d, 0x3e, 0x89, 0xbb, 0xa1, 0x95, + 0x79, 0x42, 0x95, 0x20, 0x21, 0x00, 0xe4, 0xf7, 0x89, 0xfc, 0x18, 0x86, 0xeb, 0xb9, 0x0a, 0x58, + 0xb0, 0x31, 0x2b, 0x33, 0x99, 0x7e, 0xc4, 0x03, 0x1c, 0x76, 0x8c, 0x44, 0x0a, 0xca, 0xf3, 0x6b, + 0xc8, 0xbb, 0x52, 0x2a, 0xd8, 0x78, 0x50, 0xe6, 0x93, 0xf3, 0xea, 0x6f, 0x6b, 0xd5, 0x53, 0xfb, + 0x8a, 0x78, 0x63, 0x70, 0xd4, 0x1f, 0xfb, 0x12, 0x90, 0x3c, 0x9f, 0x02, 0xc4, 0xbc, 0x19, 0x59, + 0x5c, 0xc4, 0x8c, 0x7c, 0x72, 0xb6, 0x6d, 0xec, 0xdd, 0x86, 0xba, 0xb7, 0xb8, 0x90, 0x07, 0xab, + 0xaf, 0x4f, 0x5e, 0xc3, 0x30, 0x6e, 0xa0, 0xc8, 0xa2, 0x78, 0xb2, 0x4d, 0xbc, 0xdd, 0x00, 0x32, + 0x71, 0xe2, 0x9d, 0x01, 0xff, 0x71, 0x37, 0x6b, 0x34, 0xe1, 0xaf, 0x8a, 0x6c, 0xcf, 0x8a, 0x29, + 0x8c, 0x5c, 0xdc, 0x52, 0xdb, 0x7f, 0xf9, 0xcf, 0x5a, 0x22, 0x2b, 0x5b, 0xe7, 0x66, 0xf0, 0xc1, + 0xd8, 0xe3, 0x28, 0xbe, 0xce, 0xd5, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd2, 0xd1, 0x6c, 0x2e, + 0x27, 0x02, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/serving/session_service.pb.go b/executor/proto/tensorflow/serving/session_service.pb.go new file mode 100644 index 0000000000..7ecde057d3 --- /dev/null +++ b/executor/proto/tensorflow/serving/session_service.pb.go @@ -0,0 +1,282 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/apis/session_service.proto + +package serving + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + protobuf "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type SessionRunRequest struct { + // Model Specification. If version is not specified, will use the latest + // (numerical) version. + ModelSpec *ModelSpec `protobuf:"bytes,1,opt,name=model_spec,json=modelSpec,proto3" json:"model_spec,omitempty"` + // Tensors to be fed in the step. Each feed is a named tensor. + Feed []*protobuf.NamedTensorProto `protobuf:"bytes,2,rep,name=feed,proto3" json:"feed,omitempty"` + // Fetches. A list of tensor names. The caller expects a tensor to + // be returned for each fetch[i] (see RunResponse.tensor). The + // order of specified fetches does not change the execution order. + Fetch []string `protobuf:"bytes,3,rep,name=fetch,proto3" json:"fetch,omitempty"` + // Target Nodes. A list of node names. The named nodes will be run + // to but their outputs will not be fetched. + Target []string `protobuf:"bytes,4,rep,name=target,proto3" json:"target,omitempty"` + // Options for the run call. **Currently ignored.** + Options *protobuf.RunOptions `protobuf:"bytes,5,opt,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SessionRunRequest) Reset() { *m = SessionRunRequest{} } +func (m *SessionRunRequest) String() string { return proto.CompactTextString(m) } +func (*SessionRunRequest) ProtoMessage() {} +func (*SessionRunRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b6b0c7cbabd9081a, []int{0} +} + +func (m *SessionRunRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SessionRunRequest.Unmarshal(m, b) +} +func (m *SessionRunRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SessionRunRequest.Marshal(b, m, deterministic) +} +func (m *SessionRunRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SessionRunRequest.Merge(m, src) +} +func (m *SessionRunRequest) XXX_Size() int { + return xxx_messageInfo_SessionRunRequest.Size(m) +} +func (m *SessionRunRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SessionRunRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SessionRunRequest proto.InternalMessageInfo + +func (m *SessionRunRequest) GetModelSpec() *ModelSpec { + if m != nil { + return m.ModelSpec + } + return nil +} + +func (m *SessionRunRequest) GetFeed() []*protobuf.NamedTensorProto { + if m != nil { + return m.Feed + } + return nil +} + +func (m *SessionRunRequest) GetFetch() []string { + if m != nil { + return m.Fetch + } + return nil +} + +func (m *SessionRunRequest) GetTarget() []string { + if m != nil { + return m.Target + } + return nil +} + +func (m *SessionRunRequest) GetOptions() *protobuf.RunOptions { + if m != nil { + return m.Options + } + return nil +} + +type SessionRunResponse struct { + // Effective Model Specification used for session run. + ModelSpec *ModelSpec `protobuf:"bytes,3,opt,name=model_spec,json=modelSpec,proto3" json:"model_spec,omitempty"` + // NOTE: The order of the returned tensors may or may not match + // the fetch order specified in RunRequest. + Tensor []*protobuf.NamedTensorProto `protobuf:"bytes,1,rep,name=tensor,proto3" json:"tensor,omitempty"` + // Returned metadata if requested in the options. + Metadata *protobuf.RunMetadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SessionRunResponse) Reset() { *m = SessionRunResponse{} } +func (m *SessionRunResponse) String() string { return proto.CompactTextString(m) } +func (*SessionRunResponse) ProtoMessage() {} +func (*SessionRunResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b6b0c7cbabd9081a, []int{1} +} + +func (m *SessionRunResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SessionRunResponse.Unmarshal(m, b) +} +func (m *SessionRunResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SessionRunResponse.Marshal(b, m, deterministic) +} +func (m *SessionRunResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SessionRunResponse.Merge(m, src) +} +func (m *SessionRunResponse) XXX_Size() int { + return xxx_messageInfo_SessionRunResponse.Size(m) +} +func (m *SessionRunResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SessionRunResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SessionRunResponse proto.InternalMessageInfo + +func (m *SessionRunResponse) GetModelSpec() *ModelSpec { + if m != nil { + return m.ModelSpec + } + return nil +} + +func (m *SessionRunResponse) GetTensor() []*protobuf.NamedTensorProto { + if m != nil { + return m.Tensor + } + return nil +} + +func (m *SessionRunResponse) GetMetadata() *protobuf.RunMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func init() { + proto.RegisterType((*SessionRunRequest)(nil), "tensorflow.serving.SessionRunRequest") + proto.RegisterType((*SessionRunResponse)(nil), "tensorflow.serving.SessionRunResponse") +} + +func init() { + proto.RegisterFile("tensorflow_serving/apis/session_service.proto", fileDescriptor_b6b0c7cbabd9081a) +} + +var fileDescriptor_b6b0c7cbabd9081a = []byte{ + // 347 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xc1, 0x4e, 0xfa, 0x40, + 0x10, 0xc6, 0xb3, 0x14, 0xf8, 0xff, 0x19, 0x12, 0x13, 0x37, 0x06, 0x37, 0x44, 0x13, 0x82, 0xc1, + 0x90, 0x18, 0x5b, 0x02, 0x1e, 0x3d, 0x79, 0x47, 0xcd, 0xe2, 0xcd, 0x03, 0x29, 0xed, 0x14, 0x9b, + 0xd0, 0xdd, 0xda, 0xdd, 0xea, 0xd3, 0xf9, 0x34, 0xbe, 0x84, 0x47, 0xc3, 0xee, 0x22, 0x04, 0x24, + 0xea, 0xad, 0x33, 0xf3, 0x9b, 0x7e, 0xdf, 0xd7, 0x0e, 0x5c, 0x6a, 0x14, 0x4a, 0x16, 0xc9, 0x42, + 0xbe, 0x4e, 0x15, 0x16, 0x2f, 0xa9, 0x98, 0x07, 0x61, 0x9e, 0xaa, 0x40, 0xa1, 0x52, 0xa9, 0x14, + 0xb6, 0x19, 0xa1, 0x9f, 0x17, 0x52, 0x4b, 0x4a, 0xd7, 0xb8, 0xef, 0xf0, 0xf6, 0xd9, 0xbe, 0x57, + 0x64, 0x32, 0xc6, 0x85, 0x5d, 0x6c, 0xf7, 0xd6, 0x50, 0x10, 0xc9, 0x02, 0x03, 0xd3, 0x9e, 0x95, + 0x49, 0x10, 0x49, 0x91, 0xa4, 0x73, 0x87, 0x5d, 0xec, 0xc5, 0x44, 0x98, 0x61, 0x3c, 0xb5, 0x63, + 0x0b, 0x77, 0xdf, 0x09, 0x1c, 0x4e, 0xac, 0x4d, 0x5e, 0x0a, 0x8e, 0xcf, 0x25, 0x2a, 0x4d, 0xaf, + 0x01, 0x8c, 0xf0, 0x54, 0xe5, 0x18, 0x31, 0xd2, 0x21, 0xfd, 0xe6, 0xf0, 0xd4, 0xdf, 0xf5, 0xed, + 0x8f, 0x97, 0xd4, 0x24, 0xc7, 0x88, 0x37, 0xb2, 0xd5, 0x23, 0x1d, 0x40, 0x35, 0x41, 0x8c, 0x59, + 0xa5, 0xe3, 0xf5, 0x9b, 0xc3, 0x93, 0xcd, 0xbd, 0xdb, 0xa5, 0x83, 0x07, 0x53, 0xdf, 0x2f, 0xf5, + 0xb9, 0x21, 0xe9, 0x11, 0xd4, 0x12, 0xd4, 0xd1, 0x13, 0xf3, 0x3a, 0x5e, 0xbf, 0xc1, 0x6d, 0x41, + 0x5b, 0x50, 0xd7, 0x61, 0x31, 0x47, 0xcd, 0xaa, 0xa6, 0xed, 0x2a, 0x3a, 0x80, 0x7f, 0x32, 0xd7, + 0xa9, 0x14, 0x8a, 0xd5, 0x8c, 0xb5, 0xd6, 0xa6, 0x04, 0x2f, 0xc5, 0x9d, 0x9d, 0xf2, 0x15, 0xd6, + 0x7d, 0x23, 0x40, 0x37, 0x53, 0xaa, 0x5c, 0x0a, 0x85, 0x5b, 0x31, 0xbd, 0x3f, 0xc6, 0xbc, 0x82, + 0xba, 0x45, 0x19, 0xf9, 0x45, 0x50, 0xc7, 0xd2, 0x11, 0xfc, 0xcf, 0x50, 0x87, 0x71, 0xa8, 0x43, + 0x56, 0x31, 0x8a, 0xc7, 0x5b, 0xee, 0xc7, 0x6e, 0xcc, 0xbf, 0xc0, 0x61, 0x06, 0x07, 0xce, 0xfe, + 0xc4, 0x9e, 0x12, 0x7d, 0x04, 0x58, 0x07, 0xa2, 0xbd, 0xef, 0x4c, 0xef, 0xfc, 0xd6, 0xf6, 0xf9, + 0x4f, 0x98, 0xfd, 0x2e, 0x37, 0xde, 0x07, 0x21, 0xb3, 0xba, 0x39, 0x90, 0xd1, 0x67, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xe2, 0x1e, 0x34, 0x3d, 0xde, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// SessionServiceClient is the client API for SessionService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SessionServiceClient interface { + // Runs inference of a given model. + SessionRun(ctx context.Context, in *SessionRunRequest, opts ...grpc.CallOption) (*SessionRunResponse, error) +} + +type sessionServiceClient struct { + cc *grpc.ClientConn +} + +func NewSessionServiceClient(cc *grpc.ClientConn) SessionServiceClient { + return &sessionServiceClient{cc} +} + +func (c *sessionServiceClient) SessionRun(ctx context.Context, in *SessionRunRequest, opts ...grpc.CallOption) (*SessionRunResponse, error) { + out := new(SessionRunResponse) + err := c.cc.Invoke(ctx, "/tensorflow.serving.SessionService/SessionRun", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SessionServiceServer is the server API for SessionService service. +type SessionServiceServer interface { + // Runs inference of a given model. + SessionRun(context.Context, *SessionRunRequest) (*SessionRunResponse, error) +} + +// UnimplementedSessionServiceServer can be embedded to have forward compatible implementations. +type UnimplementedSessionServiceServer struct { +} + +func (*UnimplementedSessionServiceServer) SessionRun(ctx context.Context, req *SessionRunRequest) (*SessionRunResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SessionRun not implemented") +} + +func RegisterSessionServiceServer(s *grpc.Server, srv SessionServiceServer) { + s.RegisterService(&_SessionService_serviceDesc, srv) +} + +func _SessionService_SessionRun_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SessionRunRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SessionServiceServer).SessionRun(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tensorflow.serving.SessionService/SessionRun", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SessionServiceServer).SessionRun(ctx, req.(*SessionRunRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _SessionService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tensorflow.serving.SessionService", + HandlerType: (*SessionServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SessionRun", + Handler: _SessionService_SessionRun_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "tensorflow_serving/apis/session_service.proto", +} diff --git a/executor/proto/tensorflow/serving/ssl_config.pb.go b/executor/proto/tensorflow/serving/ssl_config.pb.go new file mode 100644 index 0000000000..d3505d2a6e --- /dev/null +++ b/executor/proto/tensorflow/serving/ssl_config.pb.go @@ -0,0 +1,113 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/config/ssl_config.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Configuration for a secure gRPC channel +type SSLConfig struct { + // private server key for SSL + ServerKey string `protobuf:"bytes,1,opt,name=server_key,json=serverKey,proto3" json:"server_key,omitempty"` + // public server certificate + ServerCert string `protobuf:"bytes,2,opt,name=server_cert,json=serverCert,proto3" json:"server_cert,omitempty"` + // custom certificate authority + CustomCa string `protobuf:"bytes,3,opt,name=custom_ca,json=customCa,proto3" json:"custom_ca,omitempty"` + // valid client certificate required ? + ClientVerify bool `protobuf:"varint,4,opt,name=client_verify,json=clientVerify,proto3" json:"client_verify,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SSLConfig) Reset() { *m = SSLConfig{} } +func (m *SSLConfig) String() string { return proto.CompactTextString(m) } +func (*SSLConfig) ProtoMessage() {} +func (*SSLConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_4ff8a518289eb8df, []int{0} +} + +func (m *SSLConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SSLConfig.Unmarshal(m, b) +} +func (m *SSLConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SSLConfig.Marshal(b, m, deterministic) +} +func (m *SSLConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_SSLConfig.Merge(m, src) +} +func (m *SSLConfig) XXX_Size() int { + return xxx_messageInfo_SSLConfig.Size(m) +} +func (m *SSLConfig) XXX_DiscardUnknown() { + xxx_messageInfo_SSLConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_SSLConfig proto.InternalMessageInfo + +func (m *SSLConfig) GetServerKey() string { + if m != nil { + return m.ServerKey + } + return "" +} + +func (m *SSLConfig) GetServerCert() string { + if m != nil { + return m.ServerCert + } + return "" +} + +func (m *SSLConfig) GetCustomCa() string { + if m != nil { + return m.CustomCa + } + return "" +} + +func (m *SSLConfig) GetClientVerify() bool { + if m != nil { + return m.ClientVerify + } + return false +} + +func init() { + proto.RegisterType((*SSLConfig)(nil), "tensorflow.serving.SSLConfig") +} + +func init() { + proto.RegisterFile("tensorflow_serving/config/ssl_config.proto", fileDescriptor_4ff8a518289eb8df) +} + +var fileDescriptor_4ff8a518289eb8df = []byte{ + // 187 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x8e, 0xb1, 0x0a, 0xc2, 0x30, + 0x14, 0x45, 0x89, 0x15, 0x69, 0xa2, 0x2e, 0x99, 0x02, 0x22, 0x16, 0x5d, 0x8a, 0x43, 0x3b, 0xf8, + 0x07, 0x76, 0xd4, 0xa9, 0x05, 0xd7, 0x50, 0xc3, 0x6b, 0x09, 0xd6, 0x44, 0x92, 0x58, 0xe9, 0x47, + 0xf8, 0xbf, 0x8e, 0x62, 0x52, 0x70, 0x7b, 0x9c, 0x73, 0x1e, 0x5c, 0xb2, 0x77, 0xa0, 0xac, 0x36, + 0x4d, 0xa7, 0x5f, 0xdc, 0x82, 0xe9, 0xa5, 0x6a, 0x73, 0xa1, 0x55, 0x23, 0xdb, 0xdc, 0xda, 0x8e, + 0x87, 0x33, 0x7b, 0x18, 0xed, 0x34, 0xa5, 0xff, 0x36, 0x1b, 0xdb, 0xed, 0x1b, 0x11, 0x5c, 0x55, + 0xe7, 0xc2, 0x77, 0x74, 0x4d, 0xc8, 0x4f, 0x80, 0xe1, 0x37, 0x18, 0x18, 0x4a, 0x50, 0x8a, 0x4b, + 0x1c, 0xc8, 0x09, 0x06, 0xba, 0x21, 0xf3, 0x51, 0x0b, 0x30, 0x8e, 0x4d, 0xbc, 0x1f, 0x3f, 0x0a, + 0x30, 0x8e, 0xae, 0x08, 0x16, 0x4f, 0xeb, 0xf4, 0x9d, 0x8b, 0x9a, 0x45, 0x5e, 0xc7, 0x01, 0x14, + 0x35, 0xdd, 0x91, 0xa5, 0xe8, 0x24, 0x28, 0xc7, 0x7b, 0x30, 0xb2, 0x19, 0xd8, 0x34, 0x41, 0x69, + 0x5c, 0x2e, 0x02, 0xbc, 0x78, 0x76, 0x8c, 0x3e, 0x08, 0x5d, 0x67, 0x7e, 0xef, 0xe1, 0x1b, 0x00, + 0x00, 0xff, 0xff, 0xbc, 0x39, 0x10, 0x99, 0xdd, 0x00, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/serving/static_storage_path_source.pb.go b/executor/proto/tensorflow/serving/static_storage_path_source.pb.go new file mode 100644 index 0000000000..acc24df02c --- /dev/null +++ b/executor/proto/tensorflow/serving/static_storage_path_source.pb.go @@ -0,0 +1,102 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/sources/storage_path/static_storage_path_source.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Config proto for StaticStoragePathSource. +type StaticStoragePathSourceConfig struct { + // The single servable name, version number and path to supply statically. + ServableName string `protobuf:"bytes,1,opt,name=servable_name,json=servableName,proto3" json:"servable_name,omitempty"` + VersionNum int64 `protobuf:"varint,2,opt,name=version_num,json=versionNum,proto3" json:"version_num,omitempty"` + VersionPath string `protobuf:"bytes,3,opt,name=version_path,json=versionPath,proto3" json:"version_path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StaticStoragePathSourceConfig) Reset() { *m = StaticStoragePathSourceConfig{} } +func (m *StaticStoragePathSourceConfig) String() string { return proto.CompactTextString(m) } +func (*StaticStoragePathSourceConfig) ProtoMessage() {} +func (*StaticStoragePathSourceConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_0651e15b76c19a4e, []int{0} +} + +func (m *StaticStoragePathSourceConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StaticStoragePathSourceConfig.Unmarshal(m, b) +} +func (m *StaticStoragePathSourceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StaticStoragePathSourceConfig.Marshal(b, m, deterministic) +} +func (m *StaticStoragePathSourceConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_StaticStoragePathSourceConfig.Merge(m, src) +} +func (m *StaticStoragePathSourceConfig) XXX_Size() int { + return xxx_messageInfo_StaticStoragePathSourceConfig.Size(m) +} +func (m *StaticStoragePathSourceConfig) XXX_DiscardUnknown() { + xxx_messageInfo_StaticStoragePathSourceConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_StaticStoragePathSourceConfig proto.InternalMessageInfo + +func (m *StaticStoragePathSourceConfig) GetServableName() string { + if m != nil { + return m.ServableName + } + return "" +} + +func (m *StaticStoragePathSourceConfig) GetVersionNum() int64 { + if m != nil { + return m.VersionNum + } + return 0 +} + +func (m *StaticStoragePathSourceConfig) GetVersionPath() string { + if m != nil { + return m.VersionPath + } + return "" +} + +func init() { + proto.RegisterType((*StaticStoragePathSourceConfig)(nil), "tensorflow.serving.StaticStoragePathSourceConfig") +} + +func init() { + proto.RegisterFile("tensorflow_serving/sources/storage_path/static_storage_path_source.proto", fileDescriptor_0651e15b76c19a4e) +} + +var fileDescriptor_0651e15b76c19a4e = []byte{ + // 191 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8e, 0xb1, 0xce, 0x82, 0x30, + 0x14, 0x46, 0xd3, 0x9f, 0xe4, 0x4f, 0xac, 0xb8, 0x74, 0x62, 0x31, 0xa2, 0x2e, 0x4c, 0x30, 0xf8, + 0x08, 0x2e, 0x4e, 0xc4, 0xc0, 0x03, 0x34, 0x85, 0x5c, 0xa0, 0x09, 0xed, 0x25, 0x6d, 0xc1, 0x57, + 0xf0, 0xb1, 0x0d, 0x05, 0xa2, 0xeb, 0x97, 0x73, 0xce, 0xbd, 0xf4, 0xe1, 0x40, 0x5b, 0x34, 0x4d, + 0x8f, 0x2f, 0x6e, 0xc1, 0x4c, 0x52, 0xb7, 0x99, 0xc5, 0xd1, 0xd4, 0x60, 0x33, 0xeb, 0xd0, 0x88, + 0x16, 0xf8, 0x20, 0x5c, 0x97, 0x59, 0x27, 0x9c, 0xac, 0xf9, 0xef, 0xc6, 0x17, 0x30, 0x1d, 0x0c, + 0x3a, 0x64, 0xec, 0x5b, 0x4a, 0xd7, 0xd2, 0xe5, 0x4d, 0xe8, 0xb1, 0xf4, 0x62, 0xb9, 0x78, 0x4f, + 0xe1, 0xba, 0xd2, 0x5b, 0x77, 0xd4, 0x8d, 0x6c, 0xd9, 0x95, 0x1e, 0x66, 0x58, 0x54, 0x3d, 0x70, + 0x2d, 0x14, 0x44, 0x24, 0x26, 0xc9, 0xae, 0x08, 0xb7, 0x31, 0x17, 0x0a, 0xd8, 0x89, 0xee, 0x27, + 0x30, 0x56, 0xa2, 0xe6, 0x7a, 0x54, 0xd1, 0x5f, 0x4c, 0x92, 0xa0, 0xa0, 0xeb, 0x94, 0x8f, 0x8a, + 0x9d, 0x69, 0xb8, 0x01, 0xf3, 0x63, 0x51, 0xe0, 0x23, 0x9b, 0x34, 0x1f, 0xad, 0xfe, 0xfd, 0x97, + 0xb7, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4a, 0xea, 0x2a, 0xca, 0xf1, 0x00, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/serving/status.pb.go b/executor/proto/tensorflow/serving/status.pb.go new file mode 100644 index 0000000000..4c71b89916 --- /dev/null +++ b/executor/proto/tensorflow/serving/status.pb.go @@ -0,0 +1,96 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tensorflow_serving/util/status.proto + +package serving + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + core "github.com/tensorflow/tensorflow/tensorflow/go/core/lib/core" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Status that corresponds to Status in +// third_party/tensorflow/core/lib/core/status.h. +type StatusProto struct { + // Error code. + ErrorCode core.Code `protobuf:"varint,1,opt,name=error_code,proto3,enum=tensorflow.error.Code" json:"error_code,omitempty"` + // Error message. Will only be set if an error was encountered. + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,proto3" json:"error_message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatusProto) Reset() { *m = StatusProto{} } +func (m *StatusProto) String() string { return proto.CompactTextString(m) } +func (*StatusProto) ProtoMessage() {} +func (*StatusProto) Descriptor() ([]byte, []int) { + return fileDescriptor_720b9481ad3ba6a2, []int{0} +} + +func (m *StatusProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StatusProto.Unmarshal(m, b) +} +func (m *StatusProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StatusProto.Marshal(b, m, deterministic) +} +func (m *StatusProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusProto.Merge(m, src) +} +func (m *StatusProto) XXX_Size() int { + return xxx_messageInfo_StatusProto.Size(m) +} +func (m *StatusProto) XXX_DiscardUnknown() { + xxx_messageInfo_StatusProto.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusProto proto.InternalMessageInfo + +func (m *StatusProto) GetErrorCode() core.Code { + if m != nil { + return m.ErrorCode + } + return core.Code_OK +} + +func (m *StatusProto) GetErrorMessage() string { + if m != nil { + return m.ErrorMessage + } + return "" +} + +func init() { + proto.RegisterType((*StatusProto)(nil), "tensorflow.serving.StatusProto") +} + +func init() { + proto.RegisterFile("tensorflow_serving/util/status.proto", fileDescriptor_720b9481ad3ba6a2) +} + +var fileDescriptor_720b9481ad3ba6a2 = []byte{ + // 164 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x29, 0x49, 0xcd, 0x2b, + 0xce, 0x2f, 0x4a, 0xcb, 0xc9, 0x2f, 0x8f, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0xcc, 0x4b, 0xd7, 0x2f, + 0x2d, 0xc9, 0xcc, 0xd1, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, + 0x17, 0x12, 0x42, 0xa8, 0xd2, 0x83, 0xaa, 0x92, 0xd2, 0x42, 0x88, 0xe9, 0x27, 0xe7, 0x17, 0xa5, + 0xea, 0xe7, 0x64, 0x26, 0x41, 0x18, 0xa9, 0x45, 0x45, 0xf9, 0x45, 0xf1, 0xc9, 0xf9, 0x29, 0xa9, + 0x50, 0xfd, 0x4a, 0xd9, 0x5c, 0xdc, 0xc1, 0x60, 0xf3, 0x02, 0xc0, 0xc6, 0x99, 0x71, 0x71, 0x21, + 0xd4, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x19, 0x89, 0xe9, 0x21, 0xd9, 0x01, 0x96, 0xd5, 0x73, + 0xce, 0x4f, 0x49, 0x0d, 0x42, 0x52, 0x29, 0xa4, 0xc2, 0xc5, 0x0b, 0xe1, 0xe5, 0xa6, 0x16, 0x17, + 0x27, 0xa6, 0xa7, 0x4a, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xa1, 0x0a, 0x3a, 0x31, 0xff, 0x60, + 0x64, 0x4c, 0x62, 0x03, 0x5b, 0x6c, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x6e, 0x2f, 0xb5, + 0xe0, 0x00, 0x00, 0x00, +} diff --git a/executor/proto/tensorflow/stream_executor/dnn.proto b/executor/proto/tensorflow/stream_executor/dnn.proto new file mode 100644 index 0000000000..fb6bda9d31 --- /dev/null +++ b/executor/proto/tensorflow/stream_executor/dnn.proto @@ -0,0 +1,113 @@ +// LINT: LEGACY_NAMES +syntax = "proto3"; + +package stream_executor.dnn; + +// Specifies the data type used by an operation. +enum DataType { + kFloat = 0; + kDouble = 1; + kHalf = 2; + kInt8 = 3; + kInt32 = 4; +} + +// Describes how a convolution input or output layer's data is formatted. +enum DataLayout { + // Naming convention: + // Y <-> row or height + // X <-> column or width + // Batch <-> batch, or N + // Depth <-> feature, or channel + // TODO(timshen): turn them into cuDNN names, e.g. kNCHW. + kYXDepthBatch = 0; + kYXBatchDepth = 1; + kBatchYXDepth = 2; // cuDNN's NHWC layout + kBatchDepthYX = 3; // cuDNN's NCHW layout + kBatchDepthYX4 = 4; // cuDNN's NCHW_VECT_C layout +} + +// Describes how a convolution filter is laid out in the memory. +enum FilterLayout { + // Naming convention: + // Y <-> row or height + // X <-> column or width + // Output <-> output feature, or N + // Input <-> input feature, or N + // TODO(timshen): turn them into cuDNN names, e.g. kNCHW. + kOutputInputYX = 0; // cuDNN's NCHW layout + kOutputYXInput = 1; // cuDNN's NHWC layout + kOutputInputYX4 = 2; // cuDNN's NCHW_VECT_C layout + kInputYXOutput = 3; + kYXInputOutput = 4; +} + +// Describes a kind of non-linearity (threshold-like mathematical function). +enum ActivationMode { + kNone = 0; + kSigmoid = 1; + // Rectified linear activation: f(x) = x < 0 ? 0 : x + kRelu = 2; + // Rectified linear activation; where upper maximum is 6.0. + kRelu6 = 3; + // Rectified linear activation; where upper maximum specified by + // BatchDescriptor::value_max(). + kReluX = 4; + kTanh = 5; + // Like ReluX; but passes all values in the range [-X,X]. + kBandPass = 6; +} + +// Describe the math definition for the conv op. The popular behavior is +// actually called cross-correlation in math, despite the operation is often +// referred as convolution. See cuDNN cudnnConvolutionMode_t. +enum ConvolutionMode { + CROSS_CORRELATION = 0; + CONVOLUTION = 1; +} + +enum ConvolutionKind { + INVALID = 0; + FORWARD = 1; + BACKWARD_FILTER = 2; + BACKWARD_DATA = 3; + FORWARD_BIAS_ACTIVATION = 4; +} + +// Generic tensor representation. +message TensorDescriptorProto { + repeated int64 dimensions = 1; + DataType data_type = 2; + oneof layout_oneof { + DataLayout data_layout = 3; + FilterLayout filter_layout = 4; + } +} + +// Generic algorithm representation. +message AlgorithmProto { + enum MathType { + DEFAULT_MATH = 0; + // The GPU may operate 4x4 matrix FMA. + // See cuDNN's documentation for CUDNN_TENSOR_OP_MATH. + TENSOR_OP_MATH = 1; + } + int64 algo_id = 1; + MathType math_type = 2; +} + +// Convolution-specific parameters. +message ConvolutionDescriptorProto { + repeated int64 paddings = 1; + repeated int64 strides = 2; + repeated int64 dilations = 3; + // The "accumulator" type. For example, use F32 as an accumulator for F16 + // convolutions. + // See cuDNN's cudnnConvolutionMode_t. + DataType compute_mode = 4; + // See cuDNN's group count. + int32 group_count = 5; + ConvolutionMode convolution_mode = 6; + // Tensorflow node name, same as in NodeDef, for debugging purposes. + string name = 7; +} diff --git a/executor/proto/tensorflow/tools/api/lib/api_objects.proto b/executor/proto/tensorflow/tools/api/lib/api_objects.proto new file mode 100644 index 0000000000..7207b9c5a9 --- /dev/null +++ b/executor/proto/tensorflow/tools/api/lib/api_objects.proto @@ -0,0 +1,42 @@ +syntax = "proto2"; + +import "google/protobuf/descriptor.proto"; + +package third_party.tensorflow.tools.api; + +message TFAPIMember { + optional string name = 1; + optional string mtype = 2; +}; + +message TFAPIMethod { + optional string name = 1; + optional string path = 2; + optional string argspec = 3; +}; + +message TFAPIModule { + repeated TFAPIMember member = 1; + repeated TFAPIMethod member_method = 2; +}; + +message TFAPIClass { + repeated string is_instance = 1; + repeated TFAPIMember member = 2; + repeated TFAPIMethod member_method = 3; +}; + +message TFAPIProto { + // Suppress generation of the proto API's descriptor() method lest it + // conflict with the standard accessor for the field having the same name. + option no_standard_descriptor_accessor = true; + + optional google.protobuf.DescriptorProto descriptor = 1; +}; + +message TFAPIObject { + optional string path = 1; + optional TFAPIModule tf_module = 2; + optional TFAPIClass tf_class = 3; + optional TFAPIProto tf_proto = 4; +}; diff --git a/executor/proto/tensorflow/tools/proto_text/test.proto b/executor/proto/tensorflow/tools/proto_text/test.proto new file mode 100644 index 0000000000..298fc0deef --- /dev/null +++ b/executor/proto/tensorflow/tools/proto_text/test.proto @@ -0,0 +1,111 @@ +syntax = "proto3"; + +package tensorflow.test; + +message TestAllTypes { + message NestedMessage { + message DoubleNestedMessage { + string optional_string = 1; + } + + int32 optional_int32 = 1; + repeated int32 repeated_int32 = 2; + DoubleNestedMessage msg = 3; + int64 optional_int64 = 4; + } + + enum NestedEnum { + ZERO = 0; + FOO = 1; + BAR = 2; + BAZ = 3; + NEG = -1; // Intentionally negative. + } + + // Singular + int32 optional_int32 = 1000; // use large tag to test output order. + int64 optional_int64 = 2; + uint32 optional_uint32 = 3; + uint64 optional_uint64 = 999; // use large tag to test output order. + sint32 optional_sint32 = 5; + sint64 optional_sint64 = 6; + fixed32 optional_fixed32 = 7; + fixed64 optional_fixed64 = 8; + sfixed32 optional_sfixed32 = 9; + sfixed64 optional_sfixed64 = 10; + float optional_float = 11; + double optional_double = 12; + bool optional_bool = 13; + string optional_string = 14; + bytes optional_bytes = 15; + + NestedMessage optional_nested_message = 18; + ForeignMessage optional_foreign_message = 19; + + NestedEnum optional_nested_enum = 21; + ForeignEnum optional_foreign_enum = 22; + + string optional_cord = 25; + + // Repeated + repeated int32 repeated_int32 = 31; + repeated int64 repeated_int64 = 32; + repeated uint32 repeated_uint32 = 33; + repeated uint64 repeated_uint64 = 34; + repeated sint32 repeated_sint32 = 35; + repeated sint64 repeated_sint64 = 36; + repeated fixed32 repeated_fixed32 = 37; + repeated fixed64 repeated_fixed64 = 38; + repeated sfixed32 repeated_sfixed32 = 39; + repeated sfixed64 repeated_sfixed64 = 40; + repeated float repeated_float = 41; + repeated double repeated_double = 42; + repeated bool repeated_bool = 43; + repeated string repeated_string = 44; + repeated bytes repeated_bytes = 45; + + repeated NestedMessage repeated_nested_message = 48; + repeated NestedEnum repeated_nested_enum = 51; + + repeated string repeated_cord = 55; + + oneof oneof_field { + uint32 oneof_uint32 = 111; + NestedMessage oneof_nested_message = 112; + string oneof_string = 113; + bytes oneof_bytes = 114; + NestedEnum oneof_enum = 100; + } + + map map_string_to_message = 58; + map map_int32_to_message = 59; + map map_int64_to_message = 60; + map map_bool_to_message = 61; + map map_string_to_int64 = 62; + map map_int64_to_string = 63; + map another_map_string_to_message = 65; + + repeated int64 packed_repeated_int64 = 64 [packed = true]; +} + +// A recursive message. +message NestedTestAllTypes { + NestedTestAllTypes child = 1; + TestAllTypes payload = 2; + + map map_string_to_int64 = 3; +} + +message ForeignMessage { + int32 c = 1; +} + +enum ForeignEnum { + FOREIGN_ZERO = 0; + FOREIGN_FOO = 4; + FOREIGN_BAR = 5; + FOREIGN_BAZ = 6; +} + +message TestEmptyMessage { +} diff --git a/executor/proto/tensorflow_serving/apis/BUILD b/executor/proto/tensorflow_serving/apis/BUILD new file mode 100644 index 0000000000..f3eeeaaead --- /dev/null +++ b/executor/proto/tensorflow_serving/apis/BUILD @@ -0,0 +1,407 @@ +# Description: Tensorflow Serving APIs. + +load("//tensorflow_serving:serving.bzl", "serving_proto_library") +load("//tensorflow_serving:serving.bzl", "serving_proto_library_py") +load("//tensorflow_serving:serving.bzl", "serving_go_grpc_library") +load("@org_tensorflow//tensorflow/core/platform:build_config.bzl", "tf_jspb_proto_library", "tf_pyclif_proto_library") + +package( + default_visibility = ["//visibility:public"], + features = ["-layering_check"], +) + +licenses(["notice"]) # Apache 2.0 + +filegroup( + name = "all_files", + srcs = glob( + ["**/*"], + exclude = [ + "**/METADATA", + "**/OWNERS", + ], + ), +) + +serving_proto_library( + name = "get_model_metadata_proto", + srcs = ["get_model_metadata.proto"], + cc_api_version = 2, + deps = [ + ":model_proto", + "@com_google_protobuf//:cc_wkt_protos", + "@org_tensorflow//tensorflow/core:protos_all", + ], +) + +serving_proto_library_py( + name = "get_model_metadata_proto_py_pb2", + srcs = ["get_model_metadata.proto"], + proto_library = "get_model_metadata_proto", + deps = [ + ":model_proto_py_pb2", + "@org_tensorflow//tensorflow/core:protos_all_py", + ], +) + +serving_proto_library( + name = "input_proto", + srcs = ["input.proto"], + cc_api_version = 2, + deps = [ + "@org_tensorflow//tensorflow/core:protos_all", + ], +) + +serving_proto_library_py( + name = "input_proto_py_pb2", + srcs = ["input.proto"], + proto_library = "input_proto", + deps = [ + "@org_tensorflow//tensorflow/core:protos_all_py", + ], +) + +tf_pyclif_proto_library( + name = "input_pyclif", + deprecated_proto_lib_visibility = True, # TODO(b/136847937) + proto_lib = ":input_proto", + proto_srcfile = "input.proto", +) + +tf_jspb_proto_library( + name = "input_jspb_proto", + deps = [":input_proto"], +) + +serving_proto_library( + name = "model_proto", + srcs = ["model.proto"], + cc_api_version = 2, + deps = [ + "@com_google_protobuf//:cc_wkt_protos", + ], +) + +serving_proto_library_py( + name = "model_proto_py_pb2", + srcs = ["model.proto"], + proto_library = "model_proto", + deps = [], +) + +tf_pyclif_proto_library( + name = "model_pyclif", + deprecated_proto_lib_visibility = True, # TODO(b/136847937) + proto_lib = ":model_proto", + proto_srcfile = "model.proto", +) + +tf_jspb_proto_library( + name = "model_jspb_proto", + deps = [":model_proto"], +) + +serving_proto_library( + name = "predict_proto", + srcs = ["predict.proto"], + cc_api_version = 2, + deps = [ + ":model_proto", + "@org_tensorflow//tensorflow/core:protos_all", + ], +) + +serving_proto_library_py( + name = "predict_proto_py_pb2", + srcs = ["predict.proto"], + proto_library = "predict_proto", + deps = [ + ":model_proto_py_pb2", + "@org_tensorflow//tensorflow/core:protos_all_py", + ], +) + +tf_jspb_proto_library( + name = "predict_jspb_proto", + deps = [":predict_proto"], +) + +serving_proto_library( + name = "prediction_log_proto", + srcs = ["prediction_log.proto"], + cc_api_version = 2, + deps = [ + ":classification_proto", + ":inference_proto", + ":predict_proto", + ":regression_proto", + ":session_service_proto", + "//tensorflow_serving/core:logging_proto", + ], +) + +serving_proto_library_py( + name = "prediction_log_proto_py_pb2", + srcs = ["prediction_log.proto"], + proto_library = "prediction_log_proto", + deps = [ + ":classification_proto_py_pb2", + ":inference_proto_py_pb2", + ":predict_proto_py_pb2", + ":regression_proto_py_pb2", + ":session_service_proto_py_pb2", + "//tensorflow_serving/core:logging_proto_py_pb2", + ], +) + +serving_proto_library( + name = "prediction_service_proto", + srcs = ["prediction_service.proto"], + has_services = 1, + cc_api_version = 2, + cc_grpc_version = 1, + deps = [ + ":classification_proto", + ":get_model_metadata_proto", + ":inference_proto", + ":predict_proto", + ":regression_proto", + ], +) + +py_library( + name = "prediction_service_proto_py_pb2", + srcs = [ + "prediction_service_pb2.py", + "prediction_service_pb2_grpc.py", + ], + srcs_version = "PY2AND3", + deps = [ + ":classification_proto_py_pb2", + ":get_model_metadata_proto_py_pb2", + ":inference_proto_py_pb2", + ":predict_proto_py_pb2", + ":regression_proto_py_pb2", + ], +) + +tf_pyclif_proto_library( + name = "predict_pyclif", + deprecated_proto_lib_visibility = True, # TODO(b/136847937) + proto_lib = ":predict_proto", + proto_srcfile = "predict.proto", +) + +serving_go_grpc_library( + name = "prediction_service_grpc", + srcs = [":prediction_service_proto"], + deps = [":prediction_service_go_proto"], +) + +serving_proto_library( + name = "model_management_proto", + srcs = ["model_management.proto"], + cc_api_version = 2, + deps = [ + "//tensorflow_serving/config:model_server_config_proto", + "//tensorflow_serving/util:status_proto", + ], +) + +serving_proto_library_py( + name = "model_management_proto_py_pb2", + srcs = ["model_management.proto"], + proto_library = "model_management_proto", + deps = [ + "//tensorflow_serving/config:model_server_config_proto_py_pb2", + "//tensorflow_serving/util:status_proto_py_pb2", + ], +) + +serving_proto_library( + name = "get_model_status_proto", + srcs = ["get_model_status.proto"], + cc_api_version = 2, + deps = [ + ":model_proto", + "//tensorflow_serving/util:status_proto", + ], +) + +serving_proto_library_py( + name = "get_model_status_proto_py_pb2", + srcs = ["get_model_status.proto"], + proto_library = "get_model_status_proto", + deps = [ + ":model_proto_py_pb2", + "//tensorflow_serving/util:status_proto_py_pb2", + ], +) + +serving_proto_library( + name = "model_service_proto", + srcs = ["model_service.proto"], + has_services = 1, + cc_api_version = 2, + cc_grpc_version = 1, + deps = [ + ":get_model_status_proto", + ":model_management_proto", + ], +) + +py_library( + name = "model_service_proto_py_pb2", + srcs = [ + "model_service_pb2.py", + "model_service_pb2_grpc.py", + ], + srcs_version = "PY2AND3", + deps = [ + ":get_model_status_proto_py_pb2", + ":model_management_proto_py_pb2", + ], +) + +serving_go_grpc_library( + name = "model_service_grpc", + srcs = [":model_service_proto"], + deps = [":model_service_go_proto"], +) + +serving_proto_library( + name = "classification_proto", + srcs = ["classification.proto"], + cc_api_version = 2, + deps = [ + ":input_proto", + ":model_proto", + ], +) + +serving_proto_library_py( + name = "classification_proto_py_pb2", + srcs = ["classification.proto"], + proto_library = "classification_proto", + deps = [ + ":input_proto_py_pb2", + ":model_proto_py_pb2", + "@org_tensorflow//tensorflow/core:protos_all_py", + ], +) + +tf_pyclif_proto_library( + name = "classification_pyclif", + deprecated_proto_lib_visibility = True, # TODO(b/136847937) + proto_lib = ":classification_proto", + proto_srcfile = "classification.proto", +) + +tf_jspb_proto_library( + name = "classification_jspb_proto", + deps = [":classification_proto"], +) + +serving_proto_library( + name = "inference_proto", + srcs = ["inference.proto"], + cc_api_version = 2, + deps = [ + ":classification_proto", + ":input_proto", + ":model_proto", + ":regression_proto", + ], +) + +serving_proto_library_py( + name = "inference_proto_py_pb2", + srcs = ["inference.proto"], + proto_library = "inference_proto", + deps = [ + ":classification_proto_py_pb2", + ":input_proto_py_pb2", + ":model_proto_py_pb2", + ":regression_proto_py_pb2", + ], +) + +tf_pyclif_proto_library( + name = "inference_pyclif", + deprecated_proto_lib_visibility = True, # TODO(b/136847937) + proto_lib = ":inference_proto", + proto_srcfile = "inference.proto", +) + +serving_proto_library( + name = "regression_proto", + srcs = ["regression.proto"], + cc_api_version = 2, + deps = [ + ":input_proto", + ":model_proto", + ], +) + +serving_proto_library_py( + name = "regression_proto_py_pb2", + srcs = ["regression.proto"], + proto_library = "regression_proto", + deps = [ + ":input_proto_py_pb2", + ":model_proto_py_pb2", + "@org_tensorflow//tensorflow/core:protos_all_py", + ], +) + +tf_jspb_proto_library( + name = "regression_jspb_proto", + deps = [":regression_proto"], +) + +serving_proto_library( + name = "session_service_proto", + srcs = ["session_service.proto"], + has_services = 1, + cc_api_version = 2, + deps = [ + ":model_proto", + "@org_tensorflow//tensorflow/core:protos_all", + ], +) + +serving_proto_library_py( + name = "session_service_proto_py_pb2", + srcs = ["session_service.proto"], + proto_library = "session_service_proto", + deps = [ + ":model_proto_py_pb2", + "@org_tensorflow//tensorflow/core:protos_all_py", + ], +) + +tf_pyclif_proto_library( + name = "regression_pyclif", + deprecated_proto_lib_visibility = True, # TODO(b/136847937) + proto_lib = ":regression_proto", + proto_srcfile = "regression.proto", +) + +cc_library( + name = "classifier", + hdrs = ["classifier.h"], + deps = [ + ":classification_cc_proto", + "@org_tensorflow//tensorflow/core:lib", + ], +) + +cc_library( + name = "regressor", + hdrs = ["regressor.h"], + deps = [ + ":regression_cc_proto", + "@org_tensorflow//tensorflow/core:lib", + ], +) diff --git a/executor/proto/tensorflow_serving/apis/classification.proto b/executor/proto/tensorflow_serving/apis/classification.proto new file mode 100644 index 0000000000..16a90b15a8 --- /dev/null +++ b/executor/proto/tensorflow_serving/apis/classification.proto @@ -0,0 +1,48 @@ +syntax = "proto3"; + +option cc_enable_arenas = true; + +import "tensorflow_serving/apis/input.proto"; +import "tensorflow_serving/apis/model.proto"; + +package tensorflow.serving; + +// A single class. +message Class { + // Label or name of the class. + string label = 1; + // Score for this class (e.g., the probability the item belongs to this + // class). As per the proto3 default-value semantics, if the score is missing, + // it should be treated as 0. + float score = 2; +} + +// List of classes for a single item (tensorflow.Example). +message Classifications { + repeated Class classes = 1; +} + +// Contains one result per input example, in the same order as the input in +// ClassificationRequest. +message ClassificationResult { + repeated Classifications classifications = 1; +} + +// RPC Interfaces + +message ClassificationRequest { + // Model Specification. If version is not specified, will use the latest + // (numerical) version. + ModelSpec model_spec = 1; + + // Input data. + tensorflow.serving.Input input = 2; +} + +message ClassificationResponse { + // Effective Model Specification used for classification. + ModelSpec model_spec = 2; + + // Result of the classification. + ClassificationResult result = 1; +} diff --git a/executor/proto/tensorflow_serving/apis/classifier.h b/executor/proto/tensorflow_serving/apis/classifier.h new file mode 100644 index 0000000000..fbd5ef573f --- /dev/null +++ b/executor/proto/tensorflow_serving/apis/classifier.h @@ -0,0 +1,49 @@ +/* Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_SERVING_APIS_CLASSIFIER_H_ +#define TENSORFLOW_SERVING_APIS_CLASSIFIER_H_ + +#include "tensorflow/core/lib/core/status.h" +#include "tensorflow_serving/apis/classification.pb.h" + +namespace tensorflow { +namespace serving { + +/// Model-type agnostic interface for performing classification. +/// +/// Specific implementations will exist for different model types +/// (e.g. TensorFlow SavedModel) that can convert the request into a model +/// specific input and know how to convert the output into a generic +/// ClassificationResult. +class ClassifierInterface { + public: + /// Given a ClassificationRequest, populates the ClassificationResult with the + /// result. + /// + /// @param request Input request specifying the model/signature to query + /// along with the data payload. + /// @param result The output classifications that will get populated. + /// @return A status object indicating success or failure. + virtual Status Classify(const ClassificationRequest& request, + ClassificationResult* result) = 0; + + virtual ~ClassifierInterface() = default; +}; + +} // namespace serving +} // namespace tensorflow + +#endif // TENSORFLOW_SERVING_APIS_CLASSIFIER_H_ diff --git a/executor/proto/tensorflow_serving/apis/get_model_metadata.proto b/executor/proto/tensorflow_serving/apis/get_model_metadata.proto new file mode 100644 index 0000000000..5d765d8e39 --- /dev/null +++ b/executor/proto/tensorflow_serving/apis/get_model_metadata.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package tensorflow.serving; +option cc_enable_arenas = true; + +import "google/protobuf/any.proto"; +import "tensorflow/core/protobuf/meta_graph.proto"; +import "tensorflow_serving/apis/model.proto"; + +// Message returned for "signature_def" field. +message SignatureDefMap { + map signature_def = 1; +}; + +message GetModelMetadataRequest { + // Model Specification indicating which model we are querying for metadata. + // If version is not specified, will use the latest (numerical) version. + ModelSpec model_spec = 1; + // Metadata fields to get. Currently supported: "signature_def". + repeated string metadata_field = 2; +} + +message GetModelMetadataResponse { + // Model Specification indicating which model this metadata belongs to. + ModelSpec model_spec = 1; + // Map of metadata field name to metadata field. The options for metadata + // field name are listed in GetModelMetadataRequest. Currently supported: + // "signature_def". + map metadata = 2; +} diff --git a/executor/proto/tensorflow_serving/apis/get_model_status.proto b/executor/proto/tensorflow_serving/apis/get_model_status.proto new file mode 100644 index 0000000000..535eb9acb6 --- /dev/null +++ b/executor/proto/tensorflow_serving/apis/get_model_status.proto @@ -0,0 +1,68 @@ +syntax = "proto3"; + +option cc_enable_arenas = true; + +import "tensorflow_serving/apis/model.proto"; +import "tensorflow_serving/util/status.proto"; + +package tensorflow.serving; + +// GetModelStatusRequest contains a ModelSpec indicating the model for which +// to get status. +message GetModelStatusRequest { + // Model Specification. If version is not specified, information about all + // versions of the model will be returned. If a version is specified, the + // status of only that version will be returned. + ModelSpec model_spec = 1; +} + +// Version number, state, and status for a single version of a model. +message ModelVersionStatus { + // Model version. + int64 version = 1; + + // States that map to ManagerState enum in + // tensorflow_serving/core/servable_state.h + enum State { + // Default value. + UNKNOWN = 0; + + // The manager is tracking this servable, but has not initiated any action + // pertaining to it. + START = 10; + + // The manager has decided to load this servable. In particular, checks + // around resource availability and other aspects have passed, and the + // manager is about to invoke the loader's Load() method. + LOADING = 20; + + // The manager has successfully loaded this servable and made it available + // for serving (i.e. GetServableHandle(id) will succeed). To avoid races, + // this state is not reported until *after* the servable is made + // available. + AVAILABLE = 30; + + // The manager has decided to make this servable unavailable, and unload + // it. To avoid races, this state is reported *before* the servable is + // made unavailable. + UNLOADING = 40; + + // This servable has reached the end of its journey in the manager. Either + // it loaded and ultimately unloaded successfully, or it hit an error at + // some point in its lifecycle. + END = 50; + } + + // Model state. + State state = 2; + + // Model status. + StatusProto status = 3; +} + +// Response for ModelStatusRequest on successful run. +message GetModelStatusResponse { + // Version number and status information for applicable model version(s). + repeated ModelVersionStatus model_version_status = 1 + [json_name = "model_version_status"]; +} diff --git a/executor/proto/tensorflow_serving/apis/inference.proto b/executor/proto/tensorflow_serving/apis/inference.proto new file mode 100644 index 0000000000..16e85cee5f --- /dev/null +++ b/executor/proto/tensorflow_serving/apis/inference.proto @@ -0,0 +1,59 @@ +// This file contains messages for various machine learning inferences +// such as regression and classification. +// +// In many applications more than one type of inference is desired for a single +// input. For example, given meteorologic data an application may want to +// perform a classification to determine if we should expect rain, snow or sun +// and also perform a regression to predict the temperature. +// Sharing the single input data between two inference tasks can be accomplished +// using MultiInferenceRequest and MultiInferenceResponse. + +syntax = "proto3"; + +option cc_enable_arenas = true; + +import "tensorflow_serving/apis/classification.proto"; +import "tensorflow_serving/apis/input.proto"; +import "tensorflow_serving/apis/model.proto"; +import "tensorflow_serving/apis/regression.proto"; + +package tensorflow.serving; + +// Inference request such as classification, regression, etc... +message InferenceTask { + // Model Specification. If version is not specified, will use the latest + // (numerical) version. + // All ModelSpecs in a MultiInferenceRequest must access the same model name. + ModelSpec model_spec = 1; + + // Signature's method_name. Should be one of the method names defined in + // third_party/tensorflow/python/saved_model/signature_constants.py. + // e.g. "tensorflow/serving/classify". + string method_name = 2; +} + +// Inference result, matches the type of request or is an error. +message InferenceResult { + ModelSpec model_spec = 1; + + oneof result { + ClassificationResult classification_result = 2; + RegressionResult regression_result = 3; + } +} + +// Inference request containing one or more requests. +message MultiInferenceRequest { + // Inference tasks. + repeated InferenceTask tasks = 1; + + // Input data. + Input input = 2; +} + +// Inference request containing one or more responses. +message MultiInferenceResponse { + // List of results; one for each InferenceTask in the request, returned in the + // same order as the request. + repeated InferenceResult results = 1; +} diff --git a/executor/proto/tensorflow_serving/apis/input.proto b/executor/proto/tensorflow_serving/apis/input.proto new file mode 100644 index 0000000000..e47ff43d74 --- /dev/null +++ b/executor/proto/tensorflow_serving/apis/input.proto @@ -0,0 +1,82 @@ +// Input used in serving APIs. Based on the tensorflow.Example family of +// feature representations. + +syntax = "proto3"; + +option cc_enable_arenas = true; + +import "tensorflow/core/example/example.proto"; + +package tensorflow.serving; + +// Specifies one or more fully independent input Examples. +// See examples at: +// https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/example.proto +message ExampleList { + repeated tensorflow.Example examples = 1; +} + +// Specifies one or more independent input Examples, with a common context +// Example. +// +// The common use case for context is to cleanly and optimally specify some +// features that are common across multiple examples. +// +// See example below with a search query as the context and multiple restaurants +// to perform some inference on. +// +// context: { +// features: { +// feature: { +// key : "query" +// value: { +// bytes_list: { +// value: [ "pizza" ] +// } +// } +// } +// } +// } +// examples: { +// features: { +// feature: { +// key : "cuisine" +// value: { +// bytes_list: { +// value: [ "Pizzeria" ] +// } +// } +// } +// } +// } +// examples: { +// features: { +// feature: { +// key : "cuisine" +// value: { +// bytes_list: { +// value: [ "Taqueria" ] +// } +// } +// } +// } +// } +// +// Implementations of ExampleListWithContext merge the context Example into each +// of the Examples. Note that feature keys must not be duplicated between the +// Examples and context Example, or the behavior is undefined. +// +// See also: +// tensorflow/core/example/example.proto +// https://developers.google.com/protocol-buffers/docs/proto3#maps +message ExampleListWithContext { + repeated tensorflow.Example examples = 1; + tensorflow.Example context = 2; +} + +message Input { + oneof kind { + ExampleList example_list = 1 [lazy = true]; + ExampleListWithContext example_list_with_context = 2 [lazy = true]; + } +} diff --git a/executor/proto/tensorflow_serving/apis/internal/BUILD b/executor/proto/tensorflow_serving/apis/internal/BUILD new file mode 100644 index 0000000000..6416d42000 --- /dev/null +++ b/executor/proto/tensorflow_serving/apis/internal/BUILD @@ -0,0 +1,24 @@ +# Internal implementation details of serving APIs. + +package( + default_visibility = [ + "//tensorflow_serving:internal", + ], + features = ["-layering_check"], +) + +licenses(["notice"]) # Apache 2.0 + +load("//tensorflow_serving:serving.bzl", "serving_proto_library") + +serving_proto_library( + name = "serialized_input_proto", + srcs = ["serialized_input.proto"], + cc_api_version = 2, + visibility = [ + "//tensorflow_serving:internal", + "@org_tensorflow//tensorflow_ranking/google:__pkg__", + ], + deps = [ + ], +) diff --git a/executor/proto/tensorflow_serving/apis/internal/serialized_input.proto b/executor/proto/tensorflow_serving/apis/internal/serialized_input.proto new file mode 100644 index 0000000000..36c026a76c --- /dev/null +++ b/executor/proto/tensorflow_serving/apis/internal/serialized_input.proto @@ -0,0 +1,45 @@ +/* Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// Serialized counterparts of the messages in input.proto. These protos enable +// us to keep the original tensorflow.serving.Input's structure but with the +// tensorflow.Examples in their serialized form. When combined with lazy +// parsing, this improves performance by allowing us to skip a redundant +// deserialization/serialization loop. +// +// WARNING: These are internal implementation details and not part of the public +// API. + +syntax = "proto3"; + +option cc_enable_arenas = true; + +package tensorflow.serving.internal; + +message SerializedExampleList { + repeated bytes examples = 1; +} + +message SerializedExampleListWithContext { + repeated bytes examples = 1; + bytes context = 2; +} + +message SerializedInput { + oneof kind { + SerializedExampleList example_list = 1; + SerializedExampleListWithContext example_list_with_context = 2; + } +} diff --git a/executor/proto/tensorflow_serving/apis/model.proto b/executor/proto/tensorflow_serving/apis/model.proto new file mode 100644 index 0000000000..56493f6822 --- /dev/null +++ b/executor/proto/tensorflow_serving/apis/model.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package tensorflow.serving; +option cc_enable_arenas = true; + +import "google/protobuf/wrappers.proto"; + +// Metadata for an inference request such as the model name and version. +message ModelSpec { + // Required servable name. + string name = 1; + + // Optional choice of which version of the model to use. + // + // Recommended to be left unset in the common case. Should be specified only + // when there is a strong version consistency requirement. + // + // When left unspecified, the system will serve the best available version. + // This is typically the latest version, though during version transitions, + // notably when serving on a fleet of instances, may be either the previous or + // new version. + oneof version_choice { + // Use this specific version number. + google.protobuf.Int64Value version = 2; + + // Use the version associated with the given label. + string version_label = 4; + } + + // A named signature to evaluate. If unspecified, the default signature will + // be used. + string signature_name = 3; +} diff --git a/executor/proto/tensorflow_serving/apis/model_management.proto b/executor/proto/tensorflow_serving/apis/model_management.proto new file mode 100644 index 0000000000..9140fa19aa --- /dev/null +++ b/executor/proto/tensorflow_serving/apis/model_management.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +import "tensorflow_serving/config/model_server_config.proto"; +import "tensorflow_serving/util/status.proto"; + +package tensorflow.serving; +option cc_enable_arenas = true; + +message ReloadConfigRequest { + ModelServerConfig config = 1; +} + +message ReloadConfigResponse { + StatusProto status = 1; +} diff --git a/executor/proto/tensorflow_serving/apis/model_service.proto b/executor/proto/tensorflow_serving/apis/model_service.proto new file mode 100644 index 0000000000..29a3b07751 --- /dev/null +++ b/executor/proto/tensorflow_serving/apis/model_service.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +option cc_enable_arenas = true; + +import "tensorflow_serving/apis/get_model_status.proto"; +import "tensorflow_serving/apis/model_management.proto"; + +package tensorflow.serving; + +// ModelService provides methods to query and update the state of the server, +// e.g. which models/versions are being served. +service ModelService { + // Gets status of model. If the ModelSpec in the request does not specify + // version, information about all versions of the model will be returned. If + // the ModelSpec in the request does specify a version, the status of only + // that version will be returned. + rpc GetModelStatus(GetModelStatusRequest) returns (GetModelStatusResponse); + + // Reloads the set of served models. The new config supersedes the old one, + // so if a model is omitted from the new config it will be unloaded and no + // longer served. + rpc HandleReloadConfigRequest(ReloadConfigRequest) + returns (ReloadConfigResponse); +} diff --git a/executor/proto/tensorflow_serving/apis/predict.proto b/executor/proto/tensorflow_serving/apis/predict.proto new file mode 100644 index 0000000000..0e9ebf39ca --- /dev/null +++ b/executor/proto/tensorflow_serving/apis/predict.proto @@ -0,0 +1,40 @@ +syntax = "proto3"; + +package tensorflow.serving; +option cc_enable_arenas = true; + +import "tensorflow/core/framework/tensor.proto"; +import "tensorflow_serving/apis/model.proto"; + +// PredictRequest specifies which TensorFlow model to run, as well as +// how inputs are mapped to tensors and how outputs are filtered before +// returning to user. +message PredictRequest { + // Model Specification. If version is not specified, will use the latest + // (numerical) version. + ModelSpec model_spec = 1; + + // Input tensors. + // Names of input tensor are alias names. The mapping from aliases to real + // input tensor names is stored in the SavedModel export as a prediction + // SignatureDef under the 'inputs' field. + map inputs = 2; + + // Output filter. + // Names specified are alias names. The mapping from aliases to real output + // tensor names is stored in the SavedModel export as a prediction + // SignatureDef under the 'outputs' field. + // Only tensors specified here will be run/fetched and returned, with the + // exception that when none is specified, all tensors specified in the + // named signature will be run/fetched and returned. + repeated string output_filter = 3; +} + +// Response for PredictRequest on successful run. +message PredictResponse { + // Effective Model Specification used to process PredictRequest. + ModelSpec model_spec = 2; + + // Output tensors. + map outputs = 1; +} diff --git a/executor/proto/tensorflow_serving/apis/prediction_log.proto b/executor/proto/tensorflow_serving/apis/prediction_log.proto new file mode 100644 index 0000000000..474d6bb37e --- /dev/null +++ b/executor/proto/tensorflow_serving/apis/prediction_log.proto @@ -0,0 +1,49 @@ +syntax = "proto3"; + +option cc_enable_arenas = true; + +import "tensorflow_serving/apis/classification.proto"; +import "tensorflow_serving/apis/inference.proto"; +import "tensorflow_serving/apis/predict.proto"; +import "tensorflow_serving/apis/regression.proto"; +import "tensorflow_serving/apis/session_service.proto"; +import "tensorflow_serving/core/logging.proto"; + +package tensorflow.serving; + +message ClassifyLog { + ClassificationRequest request = 1; + ClassificationResponse response = 2; +} + +message RegressLog { + RegressionRequest request = 1; + RegressionResponse response = 2; +} + +message PredictLog { + PredictRequest request = 1; + PredictResponse response = 2; +} + +message MultiInferenceLog { + MultiInferenceRequest request = 1; + MultiInferenceResponse response = 2; +} + +message SessionRunLog { + SessionRunRequest request = 1; + SessionRunResponse response = 2; +} + +// Logged model inference request. +message PredictionLog { + LogMetadata log_metadata = 1; + oneof log_type { + ClassifyLog classify_log = 2; + RegressLog regress_log = 3; + PredictLog predict_log = 6; + MultiInferenceLog multi_inference_log = 4; + SessionRunLog session_run_log = 5; + } +} diff --git a/executor/proto/tensorflow_serving/apis/prediction_service.proto b/executor/proto/tensorflow_serving/apis/prediction_service.proto new file mode 100644 index 0000000000..44e655417f --- /dev/null +++ b/executor/proto/tensorflow_serving/apis/prediction_service.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; + +package tensorflow.serving; +option cc_enable_arenas = true; + +import "tensorflow_serving/apis/classification.proto"; +import "tensorflow_serving/apis/get_model_metadata.proto"; +import "tensorflow_serving/apis/inference.proto"; +import "tensorflow_serving/apis/predict.proto"; +import "tensorflow_serving/apis/regression.proto"; + +// open source marker; do not remove +// PredictionService provides access to machine-learned models loaded by +// model_servers. +service PredictionService { + // Classify. + rpc Classify(ClassificationRequest) returns (ClassificationResponse); + + // Regress. + rpc Regress(RegressionRequest) returns (RegressionResponse); + + // Predict -- provides access to loaded TensorFlow model. + rpc Predict(PredictRequest) returns (PredictResponse); + + // MultiInference API for multi-headed models. + rpc MultiInference(MultiInferenceRequest) returns (MultiInferenceResponse); + + // GetModelMetadata - provides access to metadata for loaded models. + rpc GetModelMetadata(GetModelMetadataRequest) + returns (GetModelMetadataResponse); +} diff --git a/executor/proto/tensorflow_serving/apis/regression.proto b/executor/proto/tensorflow_serving/apis/regression.proto new file mode 100644 index 0000000000..e0ac300e84 --- /dev/null +++ b/executor/proto/tensorflow_serving/apis/regression.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +option cc_enable_arenas = true; + +import "tensorflow_serving/apis/input.proto"; +import "tensorflow_serving/apis/model.proto"; + +package tensorflow.serving; + +// Regression result for a single item (tensorflow.Example). +message Regression { + float value = 1; +} + +// Contains one result per input example, in the same order as the input in +// RegressionRequest. +message RegressionResult { + repeated Regression regressions = 1; +} + +// RPC interfaces. + +message RegressionRequest { + // Model Specification. If version is not specified, will use the latest + // (numerical) version. + ModelSpec model_spec = 1; + + // Input data. + tensorflow.serving.Input input = 2; +} + +message RegressionResponse { + // Effective Model Specification used for regression. + ModelSpec model_spec = 2; + + RegressionResult result = 1; +} diff --git a/executor/proto/tensorflow_serving/apis/regressor.h b/executor/proto/tensorflow_serving/apis/regressor.h new file mode 100644 index 0000000000..5e345df9c1 --- /dev/null +++ b/executor/proto/tensorflow_serving/apis/regressor.h @@ -0,0 +1,50 @@ + +/* Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_SERVING_APIS_REGRESSOR_H_ +#define TENSORFLOW_SERVING_APIS_REGRESSOR_H_ + +#include "tensorflow/core/lib/core/status.h" +#include "tensorflow_serving/apis/regression.pb.h" + +namespace tensorflow { +namespace serving { + +/// Model agnostic interface for performing regression. +/// +/// Specific implementations will exist for different model types +/// (e.g. TensorFlow SavedModel) that can convert the request into a model +/// specific input and know how to convert the output into a generic +/// RegressionResult. +class RegressorInterface { + public: + /// Given a RegressionRequest, populates the RegressionResult with the + /// result. + /// + /// @param request Input request specifying the model/signature to query + /// along with the data payload. + /// @param result The output regression results that will get populated. + /// @return A status object indicating success or failure. + virtual Status Regress(const RegressionRequest& request, + RegressionResult* result) = 0; + + virtual ~RegressorInterface() = default; +}; + +} // namespace serving +} // namespace tensorflow + +#endif // TENSORFLOW_SERVING_APIS_REGRESSOR_H_ diff --git a/executor/proto/tensorflow_serving/apis/session_service.proto b/executor/proto/tensorflow_serving/apis/session_service.proto new file mode 100644 index 0000000000..cda4bad868 --- /dev/null +++ b/executor/proto/tensorflow_serving/apis/session_service.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; + +option cc_enable_arenas = true; + +import "tensorflow_serving/apis/model.proto"; +import "tensorflow/core/protobuf/config.proto"; +import "tensorflow/core/protobuf/named_tensor.proto"; + +package tensorflow.serving; + +message SessionRunRequest { + // Model Specification. If version is not specified, will use the latest + // (numerical) version. + ModelSpec model_spec = 1; + + // Tensors to be fed in the step. Each feed is a named tensor. + repeated NamedTensorProto feed = 2; + + // Fetches. A list of tensor names. The caller expects a tensor to + // be returned for each fetch[i] (see RunResponse.tensor). The + // order of specified fetches does not change the execution order. + repeated string fetch = 3; + + // Target Nodes. A list of node names. The named nodes will be run + // to but their outputs will not be fetched. + repeated string target = 4; + + // Options for the run call. **Currently ignored.** + RunOptions options = 5; +} + +message SessionRunResponse { + // Effective Model Specification used for session run. + ModelSpec model_spec = 3; + + // NOTE: The order of the returned tensors may or may not match + // the fetch order specified in RunRequest. + repeated NamedTensorProto tensor = 1; + + // Returned metadata if requested in the options. + RunMetadata metadata = 2; +} + +// SessionService defines a service with which a client can interact to execute +// Tensorflow model inference. The SessionService::SessionRun method is similar +// to MasterService::RunStep of Tensorflow, except that all sessions are ready +// to run, and you request a specific model/session with ModelSpec. +service SessionService { + // Runs inference of a given model. + rpc SessionRun(SessionRunRequest) returns (SessionRunResponse); +} diff --git a/executor/proto/tensorflow_serving/config/log_collector_config.proto b/executor/proto/tensorflow_serving/config/log_collector_config.proto new file mode 100644 index 0000000000..4ce01d34a2 --- /dev/null +++ b/executor/proto/tensorflow_serving/config/log_collector_config.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package tensorflow.serving; +option cc_enable_arenas = true; + +message LogCollectorConfig { + // Identifies the type of the LogCollector we will use to collect these logs. + string type = 1; + + // The prefix to use for the filenames of the logs. + string filename_prefix = 2; +} diff --git a/executor/proto/tensorflow_serving/config/logging_config.proto b/executor/proto/tensorflow_serving/config/logging_config.proto new file mode 100644 index 0000000000..8ae4d4c9b3 --- /dev/null +++ b/executor/proto/tensorflow_serving/config/logging_config.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package tensorflow.serving; +option cc_enable_arenas = true; + +import "tensorflow_serving/config/log_collector_config.proto"; + +message SamplingConfig { + // Requests will be logged uniformly at random with this probability. Valid + // range: [0, 1.0]. + double sampling_rate = 1; +} + +// Configuration for logging query/responses. +message LoggingConfig { + LogCollectorConfig log_collector_config = 1; + SamplingConfig sampling_config = 2; +} diff --git a/executor/proto/tensorflow_serving/config/model_server_config.proto b/executor/proto/tensorflow_serving/config/model_server_config.proto new file mode 100644 index 0000000000..8d3d5543e9 --- /dev/null +++ b/executor/proto/tensorflow_serving/config/model_server_config.proto @@ -0,0 +1,84 @@ +syntax = "proto3"; + +package tensorflow.serving; +option cc_enable_arenas = true; + +import "google/protobuf/any.proto"; +import "tensorflow_serving/config/logging_config.proto"; +import "tensorflow_serving/sources/storage_path/file_system_storage_path_source.proto"; + +// The type of model. +// TODO(b/31336131): DEPRECATED. +enum ModelType { + MODEL_TYPE_UNSPECIFIED = 0 [deprecated = true]; + TENSORFLOW = 1 [deprecated = true]; + OTHER = 2 [deprecated = true]; +}; + +// Common configuration for loading a model being served. +message ModelConfig { + // Name of the model. + string name = 1; + + // Base path to the model, excluding the version directory. + // E.g> for a model at /foo/bar/my_model/123, where 123 is the version, the + // base path is /foo/bar/my_model. + // + // (This can be changed once a model is in serving, *if* the underlying data + // remains the same. Otherwise there are no guarantees about whether the old + // or new data will be used for model versions currently loaded.) + string base_path = 2; + + // Type of model. + // TODO(b/31336131): DEPRECATED. Please use 'model_platform' instead. + ModelType model_type = 3 [deprecated = true]; + + // Type of model (e.g. "tensorflow"). + // + // (This cannot be changed once a model is in serving.) + string model_platform = 4; + + reserved 5; + + // Version policy for the model indicating which version(s) of the model to + // load and make available for serving simultaneously. + // The default option is to serve only the latest version of the model. + // + // (This can be changed once a model is in serving.) + FileSystemStoragePathSourceConfig.ServableVersionPolicy model_version_policy = + 7; + + // String labels to associate with versions of the model, allowing inference + // queries to refer to versions by label instead of number. Multiple labels + // can map to the same version, but not vice-versa. + // + // An envisioned use-case for these labels is canarying tentative versions. + // For example, one can assign labels "stable" and "canary" to two specific + // versions. Perhaps initially "stable" is assigned to version 0 and "canary" + // to version 1. Once version 1 passes canary, one can shift the "stable" + // label to refer to version 1 (at that point both labels map to the same + // version -- version 1 -- which is fine). Later once version 2 is ready to + // canary one can move the "canary" label to version 2. And so on. + map version_labels = 8; + + // Configures logging requests and responses, to the model. + // + // (This can be changed once a model is in serving.) + LoggingConfig logging_config = 6; +} + +// Static list of models to be loaded for serving. +message ModelConfigList { + repeated ModelConfig config = 1; +} + +// ModelServer config. +message ModelServerConfig { + // ModelServer takes either a static file-based model config list or an Any + // proto representing custom model config that is fetched dynamically at + // runtime (through network RPC, custom service, etc.). + oneof config { + ModelConfigList model_config_list = 1; + google.protobuf.Any custom_model_config = 2; + } +} diff --git a/executor/proto/tensorflow_serving/config/monitoring_config.proto b/executor/proto/tensorflow_serving/config/monitoring_config.proto new file mode 100644 index 0000000000..9da3700de4 --- /dev/null +++ b/executor/proto/tensorflow_serving/config/monitoring_config.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package tensorflow.serving; +option cc_enable_arenas = true; + +// Configuration for Prometheus monitoring. +message PrometheusConfig { + // Whether to expose Prometheus metrics. + bool enable = 1; + + // The endpoint to expose Prometheus metrics. + // If not specified, PrometheusExporter::kPrometheusPath value is used. + string path = 2; +} + +// Configuration for monitoring. +message MonitoringConfig { + PrometheusConfig prometheus_config = 1; +} diff --git a/executor/proto/tensorflow_serving/config/platform_config.proto b/executor/proto/tensorflow_serving/config/platform_config.proto new file mode 100644 index 0000000000..4e506b3883 --- /dev/null +++ b/executor/proto/tensorflow_serving/config/platform_config.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package tensorflow.serving; +option cc_enable_arenas = true; + +import "google/protobuf/any.proto"; + +// Configuration for a servable platform e.g. tensorflow or other ML systems. +message PlatformConfig { + // The config proto for a SourceAdapter in the StoragePathSourceAdapter + // registry. + google.protobuf.Any source_adapter_config = 1; +}; + +message PlatformConfigMap { + // A map from a platform name to a platform config. The platform name is used + // in ModelConfig.model_platform. + map platform_configs = 1; +}; diff --git a/executor/proto/tensorflow_serving/config/ssl_config.proto b/executor/proto/tensorflow_serving/config/ssl_config.proto new file mode 100644 index 0000000000..0e51cd61a2 --- /dev/null +++ b/executor/proto/tensorflow_serving/config/ssl_config.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package tensorflow.serving; +option cc_enable_arenas = true; + +// Configuration for a secure gRPC channel +message SSLConfig { + // private server key for SSL + string server_key = 1; + // public server certificate + string server_cert = 2; + // custom certificate authority + string custom_ca = 3; + // valid client certificate required ? + bool client_verify = 4; +}; diff --git a/executor/proto/tensorflow_serving/core/logging.proto b/executor/proto/tensorflow_serving/core/logging.proto new file mode 100644 index 0000000000..6298bb4b24 --- /dev/null +++ b/executor/proto/tensorflow_serving/core/logging.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package tensorflow.serving; + +import "tensorflow_serving/apis/model.proto"; +import "tensorflow_serving/config/logging_config.proto"; + +option cc_enable_arenas = true; + +// Metadata logged along with the request logs. +message LogMetadata { + ModelSpec model_spec = 1; + SamplingConfig sampling_config = 2; + // List of tags used to load the relevant MetaGraphDef from SavedModel. + repeated string saved_model_tags = 3; + // TODO(b/33279154): Add more metadata as mentioned in the bug. +} diff --git a/executor/proto/tensorflow_serving/core/test_util/fake_loader_source_adapter.proto b/executor/proto/tensorflow_serving/core/test_util/fake_loader_source_adapter.proto new file mode 100644 index 0000000000..d3d4adb1e9 --- /dev/null +++ b/executor/proto/tensorflow_serving/core/test_util/fake_loader_source_adapter.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package tensorflow.serving.test_util; + +// Config proto for FakeLoaderSourceAdapter. +message FakeLoaderSourceAdapterConfig { + // FakeLoaderSourceAdapter's 'suffix' ctor parameter. + string suffix = 1; +} diff --git a/executor/proto/tensorflow_serving/model_servers/test_util/storage_path_error_injecting_source_adapter.proto b/executor/proto/tensorflow_serving/model_servers/test_util/storage_path_error_injecting_source_adapter.proto new file mode 100644 index 0000000000..39fd84f5be --- /dev/null +++ b/executor/proto/tensorflow_serving/model_servers/test_util/storage_path_error_injecting_source_adapter.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package tensorflow.serving.test_util; + +// Config proto for StoragePathErrorInjectingSourceAdapter. +message StoragePathErrorInjectingSourceAdapterConfig { + // The error message the adapter emits. + string error_message = 1; +} diff --git a/executor/proto/tensorflow_serving/resources/resources.proto b/executor/proto/tensorflow_serving/resources/resources.proto new file mode 100644 index 0000000000..125bee6d53 --- /dev/null +++ b/executor/proto/tensorflow_serving/resources/resources.proto @@ -0,0 +1,46 @@ +// Representations for resources used by servables, and available in a system. +// +// Each of the string-typed values are free-form, so that they can be extended +// by third parties. However we strongly recommend using the values defined in +// resource_values.h when possible, for standardization. + +syntax = "proto3"; + +import "google/protobuf/wrappers.proto"; + +package tensorflow.serving; + +// One kind of resource on one device (or type of device). +message Resource { + // The type of device on which the resource resides, e.g. CPU or GPU. + string device = 1; + + // A specific instance of the device of type 'device' to which the resources + // are bound (instances are assumed to be numbered 0, 1, ...). + // + // When representing the resources required by a servable that has yet to be + // loaded, this field is optional. If not set, it denotes that the servable's + // resources are not (yet) bound to a specific instance. + google.protobuf.UInt32Value device_instance = 2; + + // The kind of resource on the device (instance), e.g. RAM or compute share. + // + // A given type of resource should have a standard unit that represents the + // smallest useful quantization. We strongly recommend including the unit + // (e.g. bytes or millicores) in this string, as in "ram_bytes". + string kind = 3; +} + +// An allocation of one or more kinds of resources, along with the quantity of +// each. Used to denote the resources that a servable (or collection of +// servables) will use or is currently using. Also used to denote resources +// available to the serving system for loading more servables. +message ResourceAllocation { + // A collection of resources, each with a quantity. Treated as a resource-> + // quantity map, i.e. no resource can repeat and the order is immaterial. + message Entry { + Resource resource = 1; + uint64 quantity = 2; + } + repeated Entry resource_quantities = 1; +} diff --git a/executor/proto/tensorflow_serving/servables/hashmap/hashmap_source_adapter.proto b/executor/proto/tensorflow_serving/servables/hashmap/hashmap_source_adapter.proto new file mode 100644 index 0000000000..33e956e420 --- /dev/null +++ b/executor/proto/tensorflow_serving/servables/hashmap/hashmap_source_adapter.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package tensorflow.serving; + +// Config proto for HashmapSourceAdapter. +message HashmapSourceAdapterConfig { + // The format used by the file containing a serialized hashmap. + enum Format { + // A simple kind of CSV text file of the form: + // key0,value0\n + // key1,value1\n + // ... + SIMPLE_CSV = 0; + } + Format format = 1; +} diff --git a/executor/proto/tensorflow_serving/servables/tensorflow/saved_model_bundle_source_adapter.proto b/executor/proto/tensorflow_serving/servables/tensorflow/saved_model_bundle_source_adapter.proto new file mode 100644 index 0000000000..56cbe7ccb9 --- /dev/null +++ b/executor/proto/tensorflow_serving/servables/tensorflow/saved_model_bundle_source_adapter.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +import "tensorflow_serving/servables/tensorflow/session_bundle_config.proto"; + +package tensorflow.serving; + +// Config proto for SavedModelBundleSourceAdapter. +message SavedModelBundleSourceAdapterConfig { + // A SessionBundleConfig. + // FOR INTERNAL USE ONLY DURING TRANSITION TO SAVED_MODEL. WILL BE DEPRECATED. + // TODO(b/32248363): Replace this field with the "real" field(s). + SessionBundleConfig legacy_config = 1000; +} diff --git a/executor/proto/tensorflow_serving/servables/tensorflow/session_bundle_config.proto b/executor/proto/tensorflow_serving/servables/tensorflow/session_bundle_config.proto new file mode 100644 index 0000000000..16d63d2830 --- /dev/null +++ b/executor/proto/tensorflow_serving/servables/tensorflow/session_bundle_config.proto @@ -0,0 +1,116 @@ +syntax = "proto3"; + +import "google/protobuf/wrappers.proto"; +import "tensorflow/core/protobuf/config.proto"; +import "tensorflow/core/protobuf/named_tensor.proto"; + +package tensorflow.serving; + +// Options related to model-warmup. +message ModelWarmupOptions { + // Number of times a request is iterated during warmup replay. By default 1. + google.protobuf.Int32Value num_request_iterations = 1; +} + +// Configuration parameters for a SessionBundle, with optional batching. +message SessionBundleConfig { + // The TensorFlow runtime to connect to. + // See full documentation in tensorflow/core/public/session_options.h. + // + // For single machine serving, we recommend using the empty string "", which + // will configure the local TensorFlow runtime implementation. This provides + // the best isolation currently available across multiple Session servables. + string session_target = 1; + + // TensorFlow Session configuration options. + // See details at tensorflow/core/protobuf/config.proto. + ConfigProto session_config = 2; + + // If set, each emitted session is wrapped with a layer that schedules Run() + // calls in batches. The batching layer is transparent to the client + // (implements the tensorflow::Session API). + // + // IMPORTANT: With batching enabled, client threads will spend most of their + // time blocked on Session::Run() calls, waiting for enough peer threads to + // also call Session::Run() such that a large batch can be formed. For good + // throughput, we recommend setting the number of client threads equal to + // roughly twice the maximum batch size ('max_batch_size' below). + // + // The batching layer uses a SharedBatchScheduler to coordinate batching + // across multiple session servables emitted by this source adapter. A + // BatchSchedulerRetrier is added on top of each batching session. + BatchingParameters batching_parameters = 3; + + // If set, session run calls use a separate threadpool for restore and init + // ops as part of loading the session-bundle. The value of this field should + // correspond to the index of the tensorflow::ThreadPoolOptionProto defined as + // part of `session_config.session_inter_op_thread_pool`. + google.protobuf.Int32Value session_run_load_threadpool_index = 4; + + // EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION. + // + // Transient memory used while loading a model, which is released once the + // loading phase has completed. (This is on top of the memory used in steady- + // state while the model is in memory after it has finished loading.) + // + // TODO(b/38376838): This is a temporary hack, and it applies to all models. + // Remove it once resource estimates are moved inside SavedModel. + uint64 experimental_transient_ram_bytes_during_load = 5; + + // Set of SavedModel tags identifying the specific meta graph def to be + // loaded. + repeated string saved_model_tags = 6; + + // EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION. + // + // Input tensors to append to every Session::Run() call. + repeated NamedTensorProto experimental_fixed_input_tensors = 778; + + // Enables model warmup. + bool enable_model_warmup = 779; + ModelWarmupOptions model_warmup_options = 780; +} + +// Batching parameters. Each individual parameter is optional. If omitted, the +// default value from the relevant batching config struct (SharedBatchScheduler +// ::Options or BatchSchedulerRetrier::Options) is used. +message BatchingParameters { + // SharedBatchScheduler options (see shared_batch_scheduler.h): + // + + // The maximum size of each batch. + // + // IMPORTANT: As discussed above, use 'max_batch_size * 2' client threads to + // achieve high throughput with batching. + google.protobuf.Int64Value max_batch_size = 1; + + // If a task has been enqueued for this amount of time (in microseconds), and + // a thread is available, the scheduler will immediately form a batch from + // enqueued tasks and assign the batch to the thread for processing, even if + // the batch's size is below 'max_batch_size'. + google.protobuf.Int64Value batch_timeout_micros = 2; + + // The maximum length of the queue, in terms of the number of batches. (A + // batch that has been scheduled on a thread is considered to have been + // removed from the queue.) + google.protobuf.Int64Value max_enqueued_batches = 3; + + // The number of threads to use to process batches. + // Must be >= 1, and should be tuned carefully. + google.protobuf.Int64Value num_batch_threads = 4; + + // The name to use for the pool of batch threads. + google.protobuf.StringValue thread_pool_name = 5; + + // BatchingSession options (see batching_session.h): + // + + // The allowed batch sizes. (Ignored if left empty.) + // Requirements: + // - The entries must be in increasing order. + // - The final entry must equal 'max_batch_size'. + repeated int64 allowed_batch_sizes = 6; + + // Whether to pad variable-length inputs when a batch is formed. + bool pad_variable_length_inputs = 7; +} diff --git a/executor/proto/tensorflow_serving/servables/tensorflow/session_bundle_source_adapter.proto b/executor/proto/tensorflow_serving/servables/tensorflow/session_bundle_source_adapter.proto new file mode 100644 index 0000000000..d1fb90902e --- /dev/null +++ b/executor/proto/tensorflow_serving/servables/tensorflow/session_bundle_source_adapter.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +import "tensorflow_serving/servables/tensorflow/session_bundle_config.proto"; + +package tensorflow.serving; + +// Config proto for SessionBundleSourceAdapter. +message SessionBundleSourceAdapterConfig { + SessionBundleConfig config = 1; +} diff --git a/executor/proto/tensorflow_serving/sources/storage_path/file_system_storage_path_source.proto b/executor/proto/tensorflow_serving/sources/storage_path/file_system_storage_path_source.proto new file mode 100644 index 0000000000..add7aa2a28 --- /dev/null +++ b/executor/proto/tensorflow_serving/sources/storage_path/file_system_storage_path_source.proto @@ -0,0 +1,88 @@ +syntax = "proto3"; + +package tensorflow.serving; + +// Config proto for FileSystemStoragePathSource. +message FileSystemStoragePathSourceConfig { + // A policy that dictates which version(s) of a servable should be served. + message ServableVersionPolicy { + // Serve the latest versions (i.e. the ones with the highest version + // numbers), among those found on disk. + // + // This is the default policy, with the default number of versions as 1. + message Latest { + // Number of latest versions to serve. (The default is 1.) + uint32 num_versions = 1; + } + + // Serve all versions found on disk. + message All { + } + + // Serve a specific version (or set of versions). + // + // This policy is useful for rolling back to a specific version, or for + // canarying a specific version while still serving a separate stable + // version. + message Specific { + // The version numbers to serve. + repeated int64 versions = 1; + } + + oneof policy_choice { + Latest latest = 100; + All all = 101; + Specific specific = 102; + } + } + + // A servable name and base path to look for versions of the servable. + message ServableToMonitor { + // The servable name to supply in aspired-versions callback calls. Child + // paths of 'base_path' are considered to be versions of this servable. + string servable_name = 1; + + // The path to monitor, i.e. look for child paths of the form base_path/123. + string base_path = 2; + + // The policy to determines the number of versions of the servable to be + // served at the same time. + tensorflow.serving.FileSystemStoragePathSourceConfig.ServableVersionPolicy + servable_version_policy = 4; + + reserved 3; // Legacy version_policy definition. + } + + // The servables to monitor for new versions, and aspire. + repeated ServableToMonitor servables = 5; + + // A single servable name/base_path pair to monitor. + // DEPRECATED: Use 'servables' instead. + // TODO(b/30898016): Stop using these fields, and ultimately remove them here. + string servable_name = 1 [deprecated = true]; + string base_path = 2 [deprecated = true]; + + // How long to wait between file-system polling to look for children of + // 'base_path', in seconds. + // + // If set to zero, filesystem will be polled exactly once. If set to a + // negative value (for testing use only), polling will be entirely disabled. + int64 file_system_poll_wait_seconds = 3; + + // If true, then FileSystemStoragePathSource::Create() and ::UpdateConfig() + // fail if, for any configured servables, the file system doesn't currently + // contain at least one version under the base path. + // (Otherwise, it will emit a warning and keep pinging the file system to + // check for a version to appear later.) + // DEPRECATED: Use 'servable_versions_always_present' instead, which includes + // this behavior. + // TODO(b/30898016): Remove 2019-10-31 or later. + bool fail_if_zero_versions_at_startup = 4 [deprecated = true]; + + // If true, the servable is always expected to exist on the underlying + // filesystem. FileSystemStoragePathSource::Create() and ::UpdateConfig() will + // fail if, for any configured servables, the file system doesn't currently + // contain at least one version under the base path. In addition, if a polling + // loop find the base path empty, it will not unload existing servables. + bool servable_versions_always_present = 6; +} diff --git a/executor/proto/tensorflow_serving/sources/storage_path/static_storage_path_source.proto b/executor/proto/tensorflow_serving/sources/storage_path/static_storage_path_source.proto new file mode 100644 index 0000000000..a7bca942e2 --- /dev/null +++ b/executor/proto/tensorflow_serving/sources/storage_path/static_storage_path_source.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package tensorflow.serving; + +// Config proto for StaticStoragePathSource. +message StaticStoragePathSourceConfig { + // The single servable name, version number and path to supply statically. + string servable_name = 1; + int64 version_num = 2; + string version_path = 3; +} diff --git a/executor/proto/tensorflow_serving/util/class_registration_test.proto b/executor/proto/tensorflow_serving/util/class_registration_test.proto new file mode 100644 index 0000000000..ff542109a7 --- /dev/null +++ b/executor/proto/tensorflow_serving/util/class_registration_test.proto @@ -0,0 +1,19 @@ +// Proto messages used by class_registration_test.cc. + +syntax = "proto3"; + +import "google/protobuf/any.proto"; + +package tensorflow.serving; + +message Config1 { + string string_field = 1; +} + +message Config2 { + string string_field = 1; +} + +message MessageWithAny { + google.protobuf.Any any_field = 1; +} diff --git a/executor/proto/tensorflow_serving/util/status.proto b/executor/proto/tensorflow_serving/util/status.proto new file mode 100644 index 0000000000..8588c78022 --- /dev/null +++ b/executor/proto/tensorflow_serving/util/status.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +option cc_enable_arenas = true; + +import "tensorflow/core/lib/core/error_codes.proto"; + +package tensorflow.serving; + +// Status that corresponds to Status in +// third_party/tensorflow/core/lib/core/status.h. +message StatusProto { + // Error code. + error.Code error_code = 1 [json_name = "error_code"]; + + // Error message. Will only be set if an error was encountered. + string error_message = 2 [json_name = "error_message"]; +} diff --git a/executor/protoc.go b/executor/protoc.go new file mode 100644 index 0000000000..5d00f611cf --- /dev/null +++ b/executor/protoc.go @@ -0,0 +1,126 @@ +//+build ignore + +// TensorFlow Serving gRPC interface generator. +// +// This script works around a bunch of issues (as of 2019-08-25) between Go's +// protobuf compiler plugin, Go modules, and definitions of TensorFlow and +// TensorFlow Serving proto files. It assumes that protoc and protoc-gen-go are +// on your PATH. +// +// git clone -b r1.15 https://github.com/tensorflow/tensorflow.git +// git clone -b r1.14 https://github.com/tensorflow/serving.git +// go run protoc.go +// go mod edit -replace=github.com/tensorflow/tensorflow/tensorflow/go/core=./proto/tensorflow/core +// cd proto/tensorflow/core && go mod init github.com/tensorflow/tensorflow/tensorflow/go/core && cd - +// go build ./proto/tensorflow/serving +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" +) + +const protoDir = "proto/" + +var opts = []string{"-Iserving", "-Itensorflow"} +var cmds = []ProtocCmd{{ + PkgDir: protoDir + "tensorflow/core/example", + Inputs: []string{"tensorflow/tensorflow/core/example/*.proto"}, +}, { + PkgDir: protoDir + "tensorflow/core/framework", + Inputs: []string{"tensorflow/tensorflow/core/framework/*.proto"}, +}, { + PkgDir: protoDir + "tensorflow/core/lib/core", + Inputs: []string{"tensorflow/tensorflow/core/lib/core/*.proto"}, +}, { + GoOpts: "import_path=protobuf", + PkgDir: protoDir + "tensorflow/core/protobuf", + Inputs: []string{ + "tensorflow/tensorflow/core/protobuf/*.proto", + "tensorflow/tensorflow/stream_executor/*.proto", + }, +}, { + GoOpts: "plugins=grpc,import_path=serving", + PkgDir: protoDir + "tensorflow/serving", + Inputs: []string{ + "serving/tensorflow_serving/apis/*.proto", + "serving/tensorflow_serving/config/*.proto", + "serving/tensorflow_serving/core/*.proto", + "serving/tensorflow_serving/sources/storage_path/*.proto", + "serving/tensorflow_serving/util/*.proto", + }, +}} + +func main() { + for _, cmd := range cmds { + fmt.Fprintln(os.Stderr, "==>", cmd.PkgDir) + if err := cmd.Run(); err != nil { + if e, ok := err.(*exec.ExitError); ok { + os.Exit(e.ExitCode()) + } + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } +} + +// ProtocCmd executes protoc to generate sources for a single Go package. +type ProtocCmd struct { + GoOpts string // --go_out options + PkgDir string // Final output directory + Inputs []string // Input files or glob patterns +} + +func (pc *ProtocCmd) Run() error { + // Use a temporary protoc output directory + root := filepath.Dir(pc.PkgDir) + os.MkdirAll(root, 0777) + tmp, err := ioutil.TempDir(root, filepath.Base(pc.PkgDir)+".") + if err != nil { + return err + } + defer os.RemoveAll(tmp) + + // Run protoc + cmd := exec.Command("protoc", opts...) + cmd.Args = append(cmd.Args, "--go_out="+pc.GoOpts+":"+tmp) + for _, in := range pc.Inputs { + files, err := filepath.Glob(in) + if err != nil { + return err + } + cmd.Args = append(cmd.Args, files...) + } + cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr + err = cmd.Run() + + // Move generated files to PkgDir + os.RemoveAll(pc.PkgDir) + if err := os.MkdirAll(pc.PkgDir, 0777); err != nil { + return err + } + walkErr := filepath.Walk(tmp, func(path string, fi os.FileInfo, err error) error { + if err == nil && fi.Mode().IsRegular() { + err = os.Rename(path, filepath.Join(pc.PkgDir, fi.Name())) + } + return err + }) + if err == nil { + err = walkErr + } + + //for _, in := range pc.Inputs { + // files, err := filepath.Glob(in) + // if err != nil { + // return err + // } + // for _, path := range files { + // err = os.Rename(path, filepath.Join(pc.PkgDir, filepath.Base(path))) + // } + //} + + return err +} diff --git a/executor/samples/local/logger/Makefile b/executor/samples/local/logger/Makefile new file mode 100644 index 0000000000..91ffce9c3a --- /dev/null +++ b/executor/samples/local/logger/Makefile @@ -0,0 +1,31 @@ +BASE=../../.. + +## REST + +run_rest_executor: + ${BASE}/executor --sdep seldon-model --namespace default --predictor example --file ./model_rest.yaml --http_port 8000 + + +run_dummy_rest_model: + cd ${BASE}/../examples/models/mean_classifier && make run_rest_local + +run_dummy_logsink: + docker run -it -p 2222:80 --rm -t mendhak/http-https-echo + +curl_rest: + curl -v localhost:8000/api/v0.1/predictions -d '{"data":{"ndarray":[[1.0,2.0]]}}' + + + +## GRPC + +run_grpc_executor: + ${BASE}/executor --sdep seldon-model --namespace default --predictor example --file ./model_grpc.yaml --http_port 8000 --grpc_port 5000 --transport grpc + +run_dummy_grpc_model: + cd ${BASE}/../examples/models/mean_classifier && make run_grpc_local + +grpc_test: + cd ${BASE}/proto && grpcurl -d '{"data":{"ndarray":[[1.0,2.0]]}}' -plaintext -proto ./prediction.proto 0.0.0.0:5000 seldon.protos.Seldon/Predict + + diff --git a/executor/samples/local/logger/README.md b/executor/samples/local/logger/README.md new file mode 100644 index 0000000000..15ec583a18 --- /dev/null +++ b/executor/samples/local/logger/README.md @@ -0,0 +1,114 @@ +# Test Executor with Logger + +## REST + +Run the following commands in different terminals. + +Start dummy log sink. + +```bash +make run_dummy_logsink +``` + +Start the executor locally. +```bash +make run_rest_executor +``` + +Start a dummy REST model locally. +```bash +make run_dummy_rest_model +``` + +Send a request +```bash +make curl_rest +``` + +The log sink should show the request payload as a Cloud Event: + +``` +{ path: '/', + headers: + { host: 'localhost:2222', + 'user-agent': 'Go-http-client/1.1', + 'content-length': '32', + 'ce-id': 'a25fcefe-238f-4a3e-972a-fe64419ca74f', + 'ce-source': 'http://localhost:8000/', + 'ce-specversion': '0.2', + 'ce-time': '2019-12-24T17:55:29.709146122Z', + 'ce-type': 'io.seldon.serving.inference.request', + 'content-type': 'application/json', + 'model-id': 'classifier', + 'accept-encoding': 'gzip' }, + method: 'POST', + body: '{"data":{"ndarray":[[1.0,2.0]]}}', + cookies: undefined, + fresh: false, + hostname: 'localhost', + ip: '::ffff:172.17.0.1', + ips: [], + protocol: 'http', + query: {}, + subdomains: [], + xhr: false, + os: { hostname: '9865dd6ba322' } } + +``` + + +## gRPC + +Run the following commands in different terminals. + +Start dummy log sink. + +```bash +make run_dummy_logsink +``` + +Start the executor locally. +```bash +make run_grpc_executor +``` + +Start a dummy REST model locally. +```bash +make run_dummy_grpc_model +``` + +Send a request +```bash +make grpc_test +``` + +The log sink should show the request payload as a Cloud Event: + +``` +{ path: '/', + headers: + { host: 'localhost:2222', + 'user-agent': 'Go-http-client/1.1', + 'content-length': '30', + 'ce-id': '495807bc-a8b8-4068-8fb8-90485deffd66', + 'ce-source': 'http://localhost:8000/', + 'ce-specversion': '1.0', + 'ce-time': '2020-01-06T17:34:02.291670652Z', + 'ce-type': 'io.seldon.serving.inference.request', + 'content-type': 'application/protobuf', + 'model-id': 'classifier', + 'accept-encoding': 'gzip' }, + method: 'POST', + body: '\u001a\u001c\u001a\u001a\n\u00182\u0016\n\t\u0011\u0000\u0000\u0000\u0000\u0000\u0000๏ฟฝ?\n\t\u0011\u0000\u0000\u0000\u0000\u0000\u0000\u0000@', + cookies: undefined, + fresh: false, + hostname: 'localhost', + ip: '::ffff:172.17.0.1', + ips: [], + protocol: 'http', + query: {}, + subdomains: [], + xhr: false, + os: { hostname: 'ea9691dcda8b' } } +``` + diff --git a/executor/samples/local/logger/model_grpc.yaml b/executor/samples/local/logger/model_grpc.yaml new file mode 100644 index 0000000000..bd2b11e2c3 --- /dev/null +++ b/executor/samples/local/logger/model_grpc.yaml @@ -0,0 +1,31 @@ +apiVersion: machinelearning.seldon.io/v1alpha2 +kind: SeldonDeployment +metadata: + labels: + app: seldon + name: seldon-model +spec: + annotations: + seldon.io/executor: "true" + name: test-deployment + predictors: + - componentSpecs: + - spec: + containers: + - image: seldonio/mock_classifier_rest:1.3 + name: classifier + graph: + children: [] + endpoint: + type: GRPC + service_host: 0.0.0.0 + service_port: 9000 + name: classifier + type: MODEL + logger: + mode: request + url: http://localhost:2222 + labels: + version: v1 + name: example + replicas: 1 diff --git a/executor/samples/local/logger/model_rest.yaml b/executor/samples/local/logger/model_rest.yaml new file mode 100644 index 0000000000..3c61e616a6 --- /dev/null +++ b/executor/samples/local/logger/model_rest.yaml @@ -0,0 +1,31 @@ +apiVersion: machinelearning.seldon.io/v1alpha2 +kind: SeldonDeployment +metadata: + labels: + app: seldon + name: seldon-model +spec: + annotations: + seldon.io/executor: "true" + name: test-deployment + predictors: + - componentSpecs: + - spec: + containers: + - image: seldonio/mock_classifier_rest:1.3 + name: classifier + graph: + children: [] + endpoint: + type: REST + service_host: 0.0.0.0 + service_port: 9000 + name: classifier + type: MODEL + logger: + mode: all + url: http://localhost:2222 + labels: + version: v1 + name: example + replicas: 1 diff --git a/executor/samples/local/metrics/Makefile b/executor/samples/local/metrics/Makefile new file mode 100644 index 0000000000..c9678ac241 --- /dev/null +++ b/executor/samples/local/metrics/Makefile @@ -0,0 +1,27 @@ +BASE=../../.. + +## REST + +run_rest_executor: + ${BASE}/executor --sdep seldon-model --namespace default --predictor example --file ./model_rest.yaml --http_port 8000 + + +run_dummy_rest_model: + cd ${BASE}/../examples/models/mean_classifier && make run_rest_local + +curl_rest: + curl -v localhost:8000/api/v0.1/predictions -d '{"data":{"ndarray":[[1.0,2.0]]}}' + + +## GRPC + +run_grpc_executor: + ${BASE}/executor --sdep seldon-model --namespace default --predictor example --file ./model_grpc.yaml --http_port 8000 --grpc_port 5000 --transport grpc + +run_dummy_grpc_model: + cd ${BASE}/../examples/models/mean_classifier && make run_grpc_local + +grpc_test: + cd ${BASE}/proto && grpcurl -d '{"data":{"ndarray":[[1.0,2.0]]}}' -plaintext -proto ./prediction.proto 0.0.0.0:5000 seldon.protos.Seldon/Predict + + diff --git a/executor/samples/local/metrics/README.md b/executor/samples/local/metrics/README.md new file mode 100644 index 0000000000..e2f384af0c --- /dev/null +++ b/executor/samples/local/metrics/README.md @@ -0,0 +1,124 @@ +# Test Executor with Prometheus Metrics + +## REST + +Run the following commands in different terminals. + +Start the executor locally. +```bash +make run_rest_executor +``` + +Start a dummy REST model locally. +```bash +make run_dummy_rest_model +``` + +Send a request +```bash +make curl_rest +``` + +Check the metrics endpoint: + +``` +curl localhost:8000/metrics +``` + +You should see metrics including: + +``` +seldon_api_executor_client_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",model_image="seldonio/mock_classifier_rest",model_name="classifier",model_version="1.3",predictor_name="example",predictor_version="",service="/predict",le="0.005"} 0 +seldon_api_executor_client_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",model_image="seldonio/mock_classifier_rest",model_name="classifier",model_version="1.3",predictor_name="example",predictor_version="",service="/predict",le="0.01"} 1 +seldon_api_executor_client_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",model_image="seldonio/mock_classifier_rest",model_name="classifier",model_version="1.3",predictor_name="example",predictor_version="",service="/predict",le="0.025"} 1 +seldon_api_executor_client_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",model_image="seldonio/mock_classifier_rest",model_name="classifier",model_version="1.3",predictor_name="example",predictor_version="",service="/predict",le="0.05"} 1 +seldon_api_executor_client_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",model_image="seldonio/mock_classifier_rest",model_name="classifier",model_version="1.3",predictor_name="example",predictor_version="",service="/predict",le="0.1"} 1 +seldon_api_executor_client_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",model_image="seldonio/mock_classifier_rest",model_name="classifier",model_version="1.3",predictor_name="example",predictor_version="",service="/predict",le="0.25"} 1 +seldon_api_executor_client_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",model_image="seldonio/mock_classifier_rest",model_name="classifier",model_version="1.3",predictor_name="example",predictor_version="",service="/predict",le="0.5"} 1 +seldon_api_executor_client_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",model_image="seldonio/mock_classifier_rest",model_name="classifier",model_version="1.3",predictor_name="example",predictor_version="",service="/predict",le="1"} 1 +seldon_api_executor_client_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",model_image="seldonio/mock_classifier_rest",model_name="classifier",model_version="1.3",predictor_name="example",predictor_version="",service="/predict",le="2.5"} 1 +seldon_api_executor_client_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",model_image="seldonio/mock_classifier_rest",model_name="classifier",model_version="1.3",predictor_name="example",predictor_version="",service="/predict",le="5"} 1 +seldon_api_executor_client_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",model_image="seldonio/mock_classifier_rest",model_name="classifier",model_version="1.3",predictor_name="example",predictor_version="",service="/predict",le="10"} 1 +seldon_api_executor_client_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",model_image="seldonio/mock_classifier_rest",model_name="classifier",model_version="1.3",predictor_name="example",predictor_version="",service="/predict",le="+Inf"} 1 +seldon_api_executor_client_requests_seconds_sum{code="200",deployment_name="seldon-model",method="post",model_image="seldonio/mock_classifier_rest",model_name="classifier",model_version="1.3",predictor_name="example",predictor_version="",service="/predict"} 0.006920656 +seldon_api_executor_client_requests_seconds_count{code="200",deployment_name="seldon-model",method="post",model_image="seldonio/mock_classifier_rest",model_name="classifier",model_version="1.3",predictor_name="example",predictor_version="",service="/predict"} 1 +# HELP seldon_api_executor_server_requests_seconds A histogram of latencies for executor server +# TYPE seldon_api_executor_server_requests_seconds histogram +seldon_api_executor_server_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",predictor_name="example",predictor_version="",service="/api/v0.1/predictions",le="0.005"} 0 +seldon_api_executor_server_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",predictor_name="example",predictor_version="",service="/api/v0.1/predictions",le="0.01"} 1 +seldon_api_executor_server_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",predictor_name="example",predictor_version="",service="/api/v0.1/predictions",le="0.025"} 1 +seldon_api_executor_server_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",predictor_name="example",predictor_version="",service="/api/v0.1/predictions",le="0.05"} 1 +seldon_api_executor_server_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",predictor_name="example",predictor_version="",service="/api/v0.1/predictions",le="0.1"} 1 +seldon_api_executor_server_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",predictor_name="example",predictor_version="",service="/api/v0.1/predictions",le="0.25"} 1 +seldon_api_executor_server_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",predictor_name="example",predictor_version="",service="/api/v0.1/predictions",le="0.5"} 1 +seldon_api_executor_server_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",predictor_name="example",predictor_version="",service="/api/v0.1/predictions",le="1"} 1 +seldon_api_executor_server_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",predictor_name="example",predictor_version="",service="/api/v0.1/predictions",le="2.5"} 1 +seldon_api_executor_server_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",predictor_name="example",predictor_version="",service="/api/v0.1/predictions",le="5"} 1 +seldon_api_executor_server_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",predictor_name="example",predictor_version="",service="/api/v0.1/predictions",le="10"} 1 +seldon_api_executor_server_requests_seconds_bucket{code="200",deployment_name="seldon-model",method="post",predictor_name="example",predictor_version="",service="/api/v0.1/predictions",le="+Inf"} 1 +seldon_api_executor_server_requests_seconds_sum{code="200",deployment_name="seldon-model",method="post",predictor_name="example",predictor_version="",service="/api/v0.1/predictions"} 0.007476718 +seldon_api_executor_server_requests_seconds_count{code="200",deployment_name="seldon-model",method="post",predictor_name="example",predictor_version="",service="/api/v0.1/predictions"} 1 +``` + + +## gRPC + +Run the following commands in different terminals. + + +Start the executor locally. +```bash +make run_grpc_executor +``` + +Start a dummy REST model locally. +```bash +make run_dummy_grpc_model +``` + +Send a request +```bash +make grpc_test +``` + +Check the metrics endpoint: + +``` +curl localhost:8000/metrics +``` + +You should see metrics including: + +``` +seldon_api_executor_client_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",model_image="1.3",model_name="seldonio/mock_classifier_rest",model_version="/seldon.protos.Model/Predict",predictor_name="example",predictor_version="",service="classifier",le="0.005"} 0 +seldon_api_executor_client_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",model_image="1.3",model_name="seldonio/mock_classifier_rest",model_version="/seldon.protos.Model/Predict",predictor_name="example",predictor_version="",service="classifier",le="0.01"} 1 +seldon_api_executor_client_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",model_image="1.3",model_name="seldonio/mock_classifier_rest",model_version="/seldon.protos.Model/Predict",predictor_name="example",predictor_version="",service="classifier",le="0.025"} 1 +seldon_api_executor_client_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",model_image="1.3",model_name="seldonio/mock_classifier_rest",model_version="/seldon.protos.Model/Predict",predictor_name="example",predictor_version="",service="classifier",le="0.05"} 1 +seldon_api_executor_client_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",model_image="1.3",model_name="seldonio/mock_classifier_rest",model_version="/seldon.protos.Model/Predict",predictor_name="example",predictor_version="",service="classifier",le="0.1"} 1 +seldon_api_executor_client_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",model_image="1.3",model_name="seldonio/mock_classifier_rest",model_version="/seldon.protos.Model/Predict",predictor_name="example",predictor_version="",service="classifier",le="0.25"} 1 +seldon_api_executor_client_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",model_image="1.3",model_name="seldonio/mock_classifier_rest",model_version="/seldon.protos.Model/Predict",predictor_name="example",predictor_version="",service="classifier",le="0.5"} 1 +seldon_api_executor_client_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",model_image="1.3",model_name="seldonio/mock_classifier_rest",model_version="/seldon.protos.Model/Predict",predictor_name="example",predictor_version="",service="classifier",le="1"} 1 +seldon_api_executor_client_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",model_image="1.3",model_name="seldonio/mock_classifier_rest",model_version="/seldon.protos.Model/Predict",predictor_name="example",predictor_version="",service="classifier",le="2.5"} 1 +seldon_api_executor_client_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",model_image="1.3",model_name="seldonio/mock_classifier_rest",model_version="/seldon.protos.Model/Predict",predictor_name="example",predictor_version="",service="classifier",le="5"} 1 +seldon_api_executor_client_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",model_image="1.3",model_name="seldonio/mock_classifier_rest",model_version="/seldon.protos.Model/Predict",predictor_name="example",predictor_version="",service="classifier",le="10"} 1 +seldon_api_executor_client_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",model_image="1.3",model_name="seldonio/mock_classifier_rest",model_version="/seldon.protos.Model/Predict",predictor_name="example",predictor_version="",service="classifier",le="+Inf"} 1 +seldon_api_executor_client_requests_seconds_sum{code="OK",deployment_name="seldon-model",method="unary",model_image="1.3",model_name="seldonio/mock_classifier_rest",model_version="/seldon.protos.Model/Predict",predictor_name="example",predictor_version="",service="classifier"} 0.005590603 +seldon_api_executor_client_requests_seconds_count{code="OK",deployment_name="seldon-model",method="unary",model_image="1.3",model_name="seldonio/mock_classifier_rest",model_version="/seldon.protos.Model/Predict",predictor_name="example",predictor_version="",service="classifier"} 1 +# HELP seldon_api_executor_server_requests_seconds A histogram of latencies for executor server +# TYPE seldon_api_executor_server_requests_seconds histogram +seldon_api_executor_server_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",predictor_name="example",predictor_version="",service="/seldon.protos.Seldon/Predict",le="0.005"} 0 +seldon_api_executor_server_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",predictor_name="example",predictor_version="",service="/seldon.protos.Seldon/Predict",le="0.01"} 1 +seldon_api_executor_server_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",predictor_name="example",predictor_version="",service="/seldon.protos.Seldon/Predict",le="0.025"} 1 +seldon_api_executor_server_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",predictor_name="example",predictor_version="",service="/seldon.protos.Seldon/Predict",le="0.05"} 1 +seldon_api_executor_server_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",predictor_name="example",predictor_version="",service="/seldon.protos.Seldon/Predict",le="0.1"} 1 +seldon_api_executor_server_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",predictor_name="example",predictor_version="",service="/seldon.protos.Seldon/Predict",le="0.25"} 1 +seldon_api_executor_server_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",predictor_name="example",predictor_version="",service="/seldon.protos.Seldon/Predict",le="0.5"} 1 +seldon_api_executor_server_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",predictor_name="example",predictor_version="",service="/seldon.protos.Seldon/Predict",le="1"} 1 +seldon_api_executor_server_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",predictor_name="example",predictor_version="",service="/seldon.protos.Seldon/Predict",le="2.5"} 1 +seldon_api_executor_server_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",predictor_name="example",predictor_version="",service="/seldon.protos.Seldon/Predict",le="5"} 1 +seldon_api_executor_server_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",predictor_name="example",predictor_version="",service="/seldon.protos.Seldon/Predict",le="10"} 1 +seldon_api_executor_server_requests_seconds_bucket{code="OK",deployment_name="seldon-model",method="unary",predictor_name="example",predictor_version="",service="/seldon.protos.Seldon/Predict",le="+Inf"} 1 +seldon_api_executor_server_requests_seconds_sum{code="OK",deployment_name="seldon-model",method="unary",predictor_name="example",predictor_version="",service="/seldon.protos.Seldon/Predict"} 0.005860215 +seldon_api_executor_server_requests_seconds_count{code="OK",deployment_name="seldon-model",method="unary",predictor_name="example",predictor_version="",service="/seldon.protos.Seldon/Predict"} 1 + +``` \ No newline at end of file diff --git a/executor/samples/local/metrics/model_grpc.yaml b/executor/samples/local/metrics/model_grpc.yaml new file mode 100644 index 0000000000..265f0e78e4 --- /dev/null +++ b/executor/samples/local/metrics/model_grpc.yaml @@ -0,0 +1,28 @@ +apiVersion: machinelearning.seldon.io/v1alpha2 +kind: SeldonDeployment +metadata: + labels: + app: seldon + name: seldon-model +spec: + annotations: + seldon.io/executor: "true" + name: test-deployment + predictors: + - componentSpecs: + - spec: + containers: + - image: seldonio/mock_classifier_rest:1.3 + name: classifier + graph: + children: [] + endpoint: + type: GRPC + service_host: 0.0.0.0 + service_port: 9000 + name: classifier + type: MODEL + labels: + version: v1 + name: example + replicas: 1 diff --git a/executor/samples/local/metrics/model_rest.yaml b/executor/samples/local/metrics/model_rest.yaml new file mode 100644 index 0000000000..0bece0ef37 --- /dev/null +++ b/executor/samples/local/metrics/model_rest.yaml @@ -0,0 +1,28 @@ +apiVersion: machinelearning.seldon.io/v1alpha2 +kind: SeldonDeployment +metadata: + labels: + app: seldon + name: seldon-model +spec: + annotations: + seldon.io/executor: "true" + name: test-deployment + predictors: + - componentSpecs: + - spec: + containers: + - image: seldonio/mock_classifier_rest:1.3 + name: classifier + graph: + children: [] + endpoint: + type: REST + service_host: 0.0.0.0 + service_port: 9000 + name: classifier + type: MODEL + labels: + version: v1 + name: example + replicas: 1 diff --git a/executor/samples/local/tfserving/Makefile b/executor/samples/local/tfserving/Makefile new file mode 100644 index 0000000000..abed50c7fa --- /dev/null +++ b/executor/samples/local/tfserving/Makefile @@ -0,0 +1,43 @@ +BASE=../../.. + +## REST + +run_rest_executor: + ${BASE}/executor --sdep seldon-model --namespace default --predictor example --file ./model_rest.yaml --http_port 8000 --protocol tensorflow + +run_rest_executor_chain: + ${BASE}/executor --sdep seldon-model --namespace default --predictor example --file ./model_rest_chain.yaml --http_port 8000 --protocol tensorflow + + +run_tensorflow_serving: + docker run --name tfserver --rm -p 8501:8501 -p 8500:8500 -v "${PWD}/model:/models/half_plus_two" -e MODEL_NAME=half_plus_two tensorflow/serving + +curl_rest: + curl -d '{"instances": [1.0, 2.0, 5.0]}' -X POST http://localhost:8000/v1/models/half_plus_two/:predict + +curl_status: + curl http://localhost:8000/v1/models/half_plus_two + +curl_metadata: + curl http://localhost:8000/v1/models/half_plus_two/metadata + + +## GRPC + +run_grpc_executor: + ${BASE}/executor --sdep seldon-model --namespace default --predictor example --file ./model_grpc.yaml --http_port 8000 --grpc_port 5000 --transport grpc --protocol tensorflow + +run_grpc_executor_chain: + ${BASE}/executor --sdep seldon-model --namespace default --predictor example --file ./model_grpc_chain.yaml --http_port 8000 --grpc_port 5000 --transport grpc --protocol tensorflow + + +grpc_test: + cd ${BASE}/proto && grpcurl -d '{"model_spec":{"name":"half_plus_two"},"inputs":{"x":{"dtype": 1, "tensor_shape": {"dim":[{"size": 3}]}, "floatVal" : [1.0, 2.0, 3.0]}}}' -plaintext -proto ./prediction_service.proto 0.0.0.0:5000 tensorflow.serving.PredictionService/Predict + +grpc_status: + cd ${BASE}/proto && grpcurl -d '{"model_spec":{"name":"half_plus_two"}}' -plaintext -proto ./model_service.proto 0.0.0.0:5000 tensorflow.serving.ModelService/GetModelStatus + +grpc_metadata: + cd ${BASE}/proto && grpcurl -d '{"model_spec":{"name":"half_plus_two"},"metadata_field":"signature_def"}' -plaintext -proto ./prediction_service.proto 0.0.0.0:5000 tensorflow.serving.PredictionService/GetModelMetadata + + diff --git a/executor/samples/local/tfserving/README.md b/executor/samples/local/tfserving/README.md new file mode 100644 index 0000000000..55cca2e4f0 --- /dev/null +++ b/executor/samples/local/tfserving/README.md @@ -0,0 +1,729 @@ +# Test Executor with Tensorflow Serving + +You will need: + + * docker + * curl + * [grpcurl](https://github.com/fullstorydev/grpcurl) + * a built executor + +Clone the tensorflow repo: + +```bash +make serving +``` + + +## REST - single model + +Run the following commands in different terminals. + +Start tensorflow serving model +```bash +make run_tensorflow_serving +``` + +Start the executor locally. +```bash +make run_rest_executor +``` + +This will run the model shown below: + +```JSON +apiVersion: machinelearning.seldon.io/v1 +kind: SeldonDeployment +metadata: + labels: + app: seldon + name: seldon-model +spec: + annotations: + seldon.io/executor: "true" + name: test-deployment + predictors: + - componentSpecs: + - spec: + containers: + - image: tensorflow/serving:latest + name: half_plus_two + graph: + children: [] + endpoint: + type: REST + service_host: 0.0.0.0 + service_port: 8501 + name: half_plus_two + type: MODEL + labels: + version: v1 + name: example + replicas: 1 + +``` + +Check status of model +```bash +make curl_status +``` + +You should see a response: +``` +{ + "model_version_status": [ + { + "version": "123", + "state": "AVAILABLE", + "status": { + "error_code": "OK", + "error_message": "" + } + } + ] +} +``` + +Check model metadata +```bash +make curl_metadata +``` + +You should see a response like: +``` +{ +"model_spec":{ + "name": "half_plus_two", + "signature_name": "", + "version": "123" +} +, +"metadata": {"signature_def": { + "signature_def": { + "regress_x_to_y2": { + "inputs": { + "inputs": { + "dtype": "DT_STRING", + "tensor_shape": { + "dim": [], + "unknown_rank": true + }, + "name": "tf_example:0" + } + }, + "outputs": { + "outputs": { + "dtype": "DT_FLOAT", + "tensor_shape": { + "dim": [ + { + "size": "-1", + "name": "" + }, + { + "size": "1", + "name": "" + } + ], + "unknown_rank": false + }, + "name": "y2:0" + } + }, + "method_name": "tensorflow/serving/regress" + }, + "classify_x_to_y": { + "inputs": { + "inputs": { + "dtype": "DT_STRING", + "tensor_shape": { + "dim": [], + "unknown_rank": true + }, + "name": "tf_example:0" + } + }, + "outputs": { + "scores": { + "dtype": "DT_FLOAT", + "tensor_shape": { + "dim": [ + { + "size": "-1", + "name": "" + }, + { + "size": "1", + "name": "" + } + ], + "unknown_rank": false + }, + "name": "y:0" + } + }, + "method_name": "tensorflow/serving/classify" + }, + "regress_x2_to_y3": { + "inputs": { + "inputs": { + "dtype": "DT_FLOAT", + "tensor_shape": { + "dim": [ + { + "size": "-1", + "name": "" + }, + { + "size": "1", + "name": "" + } + ], + "unknown_rank": false + }, + "name": "x2:0" + } + }, + "outputs": { + "outputs": { + "dtype": "DT_FLOAT", + "tensor_shape": { + "dim": [ + { + "size": "-1", + "name": "" + }, + { + "size": "1", + "name": "" + } + ], + "unknown_rank": false + }, + "name": "y3:0" + } + }, + "method_name": "tensorflow/serving/regress" + }, + "serving_default": { + "inputs": { + "x": { + "dtype": "DT_FLOAT", + "tensor_shape": { + "dim": [ + { + "size": "-1", + "name": "" + }, + { + "size": "1", + "name": "" + } + ], + "unknown_rank": false + }, + "name": "x:0" + } + }, + "outputs": { + "x": { + "dtype": "DT_FLOAT", + "tensor_shape": { + "dim": [ + { + "size": "-1", + "name": "" + }, + { + "size": "1", + "name": "" + } + ], + "unknown_rank": false + }, + "name": "y:0" + } + }, + "method_name": "tensorflow/serving/predict" + }, + "regress_x_to_y": { + "inputs": { + "inputs": { + "dtype": "DT_STRING", + "tensor_shape": { + "dim": [], + "unknown_rank": true + }, + "name": "tf_example:0" + } + }, + "outputs": { + "outputs": { + "dtype": "DT_FLOAT", + "tensor_shape": { + "dim": [ + { + "size": "-1", + "name": "" + }, + { + "size": "1", + "name": "" + } + ], + "unknown_rank": false + }, + "name": "y:0" + } + }, + "method_name": "tensorflow/serving/regress" + } + } +} +} +} + +``` + +Send a request +```bash +make curl_rest +``` + +You should see a response: +``` +{ + "predictions": [2.5, 3.0, 4.5] +} +``` + + +## REST - chained model + +Run the following commands in different terminals. + +Start tensorflow serving model +```bash +make run_tensorflow_serving +``` + +Start the executor locally. +```bash +make run_rest_executor_chain +``` + +This will run against the SeldonDeployment with 2 Tensorflow models one after the other: + +```JSON +apiVersion: machinelearning.seldon.io/v1 +kind: SeldonDeployment +metadata: + labels: + app: seldon + name: seldon-model +spec: + annotations: + seldon.io/executor: "true" + name: test-deployment + predictors: + - componentSpecs: + - spec: + containers: + - image: tensorflow/serving:latest + name: half_plus_two + graph: + endpoint: + type: REST + service_host: 0.0.0.0 + service_port: 8501 + name: half_plus_two + type: MODEL + children: + - endpoint: + type: REST + service_host: 0.0.0.0 + service_port: 8501 + name: half_plus_two + type: MODEL + labels: + version: v1 + name: example + replicas: 1 + +``` + +Send a request +```bash +make curl_rest +``` + +You should see a response: +``` +{ + "predictions": [3.25, 3.5, 4.25] +} +``` + +## gRPC + +Run the following commands in different terminals. + +Start tensorflow serving model +```bash +make run_tensorflow_serving +``` + +Start the executor locally. +```bash +make run_grpc_executor +``` + +This will run the model shown below: + +```JSON +apiVersion: machinelearning.seldon.io/v1 +kind: SeldonDeployment +metadata: + labels: + app: seldon + name: seldon-model +spec: + annotations: + seldon.io/executor: "true" + name: test-deployment + predictors: + - componentSpecs: + - spec: + containers: + - image: tensorflow/serving:latest + name: half_plus_two + graph: + children: [] + endpoint: + type: GRPC + service_host: 0.0.0.0 + service_port: 8500 + name: half_plus_two + type: MODEL + labels: + version: v1 + name: example + replicas: 1 +``` + +Check Status of model +```bash +make grpc_status +``` + +You should see a response: +``` +{ + "model_version_status": [ + { + "version": "123", + "state": "AVAILABLE", + "status": { + + } + } + ] +} +``` + +Check model metadata +```bash +make grpc_metadata +``` + +You should see a reponse: +``` +{ + "modelSpec": { + "name": "half_plus_two", + "version": "123" + }, + "metadata": { + "signature_def": { + "@type": "type.googleapis.com/tensorflow.serving.SignatureDefMap", + "signatureDef": { + "classify_x_to_y": { + "inputs": { + "inputs": { + "name": "tf_example:0", + "dtype": "DT_STRING", + "tensorShape": { + "unknownRank": true + } + } + }, + "outputs": { + "scores": { + "name": "y:0", + "dtype": "DT_FLOAT", + "tensorShape": { + "dim": [ + { + "size": "-1" + }, + { + "size": "1" + } + ] + } + } + }, + "methodName": "tensorflow/serving/classify" + }, + "regress_x2_to_y3": { + "inputs": { + "inputs": { + "name": "x2:0", + "dtype": "DT_FLOAT", + "tensorShape": { + "dim": [ + { + "size": "-1" + }, + { + "size": "1" + } + ] + } + } + }, + "outputs": { + "outputs": { + "name": "y3:0", + "dtype": "DT_FLOAT", + "tensorShape": { + "dim": [ + { + "size": "-1" + }, + { + "size": "1" + } + ] + } + } + }, + "methodName": "tensorflow/serving/regress" + }, + "regress_x_to_y": { + "inputs": { + "inputs": { + "name": "tf_example:0", + "dtype": "DT_STRING", + "tensorShape": { + "unknownRank": true + } + } + }, + "outputs": { + "outputs": { + "name": "y:0", + "dtype": "DT_FLOAT", + "tensorShape": { + "dim": [ + { + "size": "-1" + }, + { + "size": "1" + } + ] + } + } + }, + "methodName": "tensorflow/serving/regress" + }, + "regress_x_to_y2": { + "inputs": { + "inputs": { + "name": "tf_example:0", + "dtype": "DT_STRING", + "tensorShape": { + "unknownRank": true + } + } + }, + "outputs": { + "outputs": { + "name": "y2:0", + "dtype": "DT_FLOAT", + "tensorShape": { + "dim": [ + { + "size": "-1" + }, + { + "size": "1" + } + ] + } + } + }, + "methodName": "tensorflow/serving/regress" + }, + "serving_default": { + "inputs": { + "x": { + "name": "x:0", + "dtype": "DT_FLOAT", + "tensorShape": { + "dim": [ + { + "size": "-1" + }, + { + "size": "1" + } + ] + } + } + }, + "outputs": { + "x": { + "name": "y:0", + "dtype": "DT_FLOAT", + "tensorShape": { + "dim": [ + { + "size": "-1" + }, + { + "size": "1" + } + ] + } + } + }, + "methodName": "tensorflow/serving/predict" + } + } + } + } +} + +``` + +Send a request +```bash +make grpc_test +``` + +You should see a response: +``` +{ + "outputs": { + "x": { + "dtype": "DT_FLOAT", + "tensorShape": { + "dim": [ + { + "size": "3" + } + ] + }, + "floatVal": [ + 2.5, + 3, + 3.5 + ] + } + }, + "modelSpec": { + "name": "half_plus_two", + "version": "123", + "signatureName": "serving_default" + } +} +``` + + +## gRPC Chained + +Run the following commands in different terminals. + +Start tensorflow serving model +```bash +make run_tensorflow_serving +``` + +Start the executor locally. +```bash +make run_grpc_executor_chain +``` + +This will run the model shown below: + +```JSON +apiVersion: machinelearning.seldon.io/v1 +kind: SeldonDeployment +metadata: + labels: + app: seldon + name: seldon-model +spec: + annotations: + seldon.io/executor: "true" + name: test-deployment + predictors: + - componentSpecs: + - spec: + containers: + - image: tensorflow/serving:latest + name: half_plus_two + graph: + endpoint: + type: GRPC + service_host: 0.0.0.0 + service_port: 8500 + name: half_plus_two + type: MODEL + children: + - endpoint: + type: GRPC + service_host: 0.0.0.0 + service_port: 8500 + name: half_plus_two + type: MODEL + labels: + version: v1 + name: example + replicas: 1 +``` + +Send a request +```bash +make grpc_test +``` + +You should see a response: +``` +{ + "outputs": { + "x": { + "dtype": "DT_FLOAT", + "tensorShape": { + "dim": [ + { + "size": "3" + } + ] + }, + "floatVal": [ + 3.25, + 3.5, + 3.75 + ] + } + }, + "modelSpec": { + "name": "half_plus_two", + "version": "123", + "signatureName": "serving_default" + } +} +``` + diff --git a/executor/samples/local/tfserving/convert.py b/executor/samples/local/tfserving/convert.py new file mode 100644 index 0000000000..6658ece11c --- /dev/null +++ b/executor/samples/local/tfserving/convert.py @@ -0,0 +1,8 @@ +import tensorflow as tf +from google.protobuf import json_format + +loaded = tf.saved_model.load("./saved_model_half_plus_two_cpu/00000123") +print(list(loaded.signatures.keys())) +print(loaded) +print(loaded.signatures["serving_default"].inputs[0]) +#json_string = json_format.MessageToJson(graph_def) diff --git a/executor/samples/local/tfserving/model/00000123/assets/foo.txt b/executor/samples/local/tfserving/model/00000123/assets/foo.txt new file mode 100644 index 0000000000..f9ff036688 --- /dev/null +++ b/executor/samples/local/tfserving/model/00000123/assets/foo.txt @@ -0,0 +1 @@ +asset-file-contents \ No newline at end of file diff --git a/executor/samples/local/tfserving/model/00000123/saved_model.pb b/executor/samples/local/tfserving/model/00000123/saved_model.pb new file mode 100644 index 0000000000..d6bbe0bd3a Binary files /dev/null and b/executor/samples/local/tfserving/model/00000123/saved_model.pb differ diff --git a/executor/samples/local/tfserving/model/00000123/variables/variables.data-00000-of-00001 b/executor/samples/local/tfserving/model/00000123/variables/variables.data-00000-of-00001 new file mode 100644 index 0000000000..a378d172b6 Binary files /dev/null and b/executor/samples/local/tfserving/model/00000123/variables/variables.data-00000-of-00001 differ diff --git a/executor/samples/local/tfserving/model/00000123/variables/variables.index b/executor/samples/local/tfserving/model/00000123/variables/variables.index new file mode 100644 index 0000000000..7dbd0d0159 Binary files /dev/null and b/executor/samples/local/tfserving/model/00000123/variables/variables.index differ diff --git a/executor/samples/local/tfserving/model_grpc.yaml b/executor/samples/local/tfserving/model_grpc.yaml new file mode 100644 index 0000000000..095b0404df --- /dev/null +++ b/executor/samples/local/tfserving/model_grpc.yaml @@ -0,0 +1,28 @@ +apiVersion: machinelearning.seldon.io/v1 +kind: SeldonDeployment +metadata: + labels: + app: seldon + name: seldon-model +spec: + annotations: + seldon.io/executor: "true" + name: test-deployment + predictors: + - componentSpecs: + - spec: + containers: + - image: tensorflow/serving:latest + name: half_plus_two + graph: + children: [] + endpoint: + type: GRPC + service_host: 0.0.0.0 + service_port: 8500 + name: half_plus_two + type: MODEL + labels: + version: v1 + name: example + replicas: 1 diff --git a/executor/samples/local/tfserving/model_grpc_chain.yaml b/executor/samples/local/tfserving/model_grpc_chain.yaml new file mode 100644 index 0000000000..ca72f5a4e1 --- /dev/null +++ b/executor/samples/local/tfserving/model_grpc_chain.yaml @@ -0,0 +1,34 @@ +apiVersion: machinelearning.seldon.io/v1 +kind: SeldonDeployment +metadata: + labels: + app: seldon + name: seldon-model +spec: + annotations: + seldon.io/executor: "true" + name: test-deployment + predictors: + - componentSpecs: + - spec: + containers: + - image: tensorflow/serving:latest + name: half_plus_two + graph: + endpoint: + type: GRPC + service_host: 0.0.0.0 + service_port: 8500 + name: half_plus_two + type: MODEL + children: + - endpoint: + type: GRPC + service_host: 0.0.0.0 + service_port: 8500 + name: half_plus_two + type: MODEL + labels: + version: v1 + name: example + replicas: 1 diff --git a/executor/samples/local/tfserving/model_rest.yaml b/executor/samples/local/tfserving/model_rest.yaml new file mode 100644 index 0000000000..297cf89846 --- /dev/null +++ b/executor/samples/local/tfserving/model_rest.yaml @@ -0,0 +1,28 @@ +apiVersion: machinelearning.seldon.io/v1 +kind: SeldonDeployment +metadata: + labels: + app: seldon + name: seldon-model +spec: + annotations: + seldon.io/executor: "true" + name: test-deployment + predictors: + - componentSpecs: + - spec: + containers: + - image: tensorflow/serving:latest + name: half_plus_two + graph: + children: [] + endpoint: + type: REST + service_host: 0.0.0.0 + service_port: 8501 + name: half_plus_two + type: MODEL + labels: + version: v1 + name: example + replicas: 1 diff --git a/executor/samples/local/tfserving/model_rest_chain.yaml b/executor/samples/local/tfserving/model_rest_chain.yaml new file mode 100644 index 0000000000..7351a96483 --- /dev/null +++ b/executor/samples/local/tfserving/model_rest_chain.yaml @@ -0,0 +1,34 @@ +apiVersion: machinelearning.seldon.io/v1 +kind: SeldonDeployment +metadata: + labels: + app: seldon + name: seldon-model +spec: + annotations: + seldon.io/executor: "true" + name: test-deployment + predictors: + - componentSpecs: + - spec: + containers: + - image: tensorflow/serving:latest + name: half_plus_two + graph: + endpoint: + type: REST + service_host: 0.0.0.0 + service_port: 8501 + name: half_plus_two + type: MODEL + children: + - endpoint: + type: REST + service_host: 0.0.0.0 + service_port: 8501 + name: half_plus_two + type: MODEL + labels: + version: v1 + name: example + replicas: 1 diff --git a/executor/samples/local/tracing/Makefile b/executor/samples/local/tracing/Makefile new file mode 100644 index 0000000000..9118665cf1 --- /dev/null +++ b/executor/samples/local/tracing/Makefile @@ -0,0 +1,36 @@ +BASE=../../.. + +## REST + +run_rest_executor: + JAEGER_SAMPLER_TYPE=const JAEGER_SAMPLER_PARAM=1 ${BASE}/executor --sdep seldon-model --namespace default --predictor example --file ./model_rest.yaml --http_port 8000 + + +run_dummy_rest_model: + cd ${BASE}/../examples/models/mean_classifier && make run_rest_local + +curl_rest: + curl -v localhost:8000/api/v0.1/predictions -d '{"data":{"ndarray":[[1.0,2.0]]}}' + +curl_status: + curl -v localhost:8000/api/v0.1/status/classifier + +curl_metadata: + curl -v localhost:8000/api/v0.1/metadata/classifier + +run_jaeger: + docker run -p 6831:6831/udp -p 16686:16686 jaegertracing/all-in-one:latest + + +## GRPC + +run_grpc_executor: + JAEGER_SAMPLER_TYPE=const JAEGER_SAMPLER_PARAM=1 ${BASE}/executor --sdep seldon-model --namespace default --predictor example --file ./model_grpc.yaml --http_port 8000 --grpc_port 5000 --transport grpc + +run_dummy_grpc_model: + cd ${BASE}/../examples/models/mean_classifier && make run_grpc_local + +grpc_test: + cd ${BASE}/proto && grpcurl -d '{"data":{"ndarray":[[1.0,2.0]]}}' -plaintext -proto ./prediction.proto 0.0.0.0:5000 seldon.protos.Seldon/Predict + + diff --git a/executor/samples/local/tracing/README.md b/executor/samples/local/tracing/README.md new file mode 100644 index 0000000000..b129fa1def --- /dev/null +++ b/executor/samples/local/tracing/README.md @@ -0,0 +1,75 @@ +# Test Executor with Open Tracing + +## REST + +Run the following commands in different terminals. + +Start all-in-one Jaeger. + +```bash +make run_jaeger +``` + +Start the executor locally. +```bash +make run_rest_executor +``` + +Start a dummy REST model locally. +```bash +make run_dummy_rest_model +``` + +Send a request +```bash +make curl_rest +``` + +Check Jaeger UI for trace at http://localhost:16686 + +Example: + +![jarget_rest](jaeger_trace_rest.png) + + +You can do the same for status and metadata calls + +```bash +make curl_status +``` + +```bash +make curl_metadata +``` + +## gRPC + +Run the following commands in different terminals. + +Start all-in-one Jaeger. + +```bash +make run_jaeger +``` + +Start the executor locally. +```bash +make run_grpc_executor +``` + +Start a dummy REST model locally. +```bash +make run_dummy_grpc_model +``` + +Send a request +```bash +make grpc_test +``` + +Check Jaeger UI for trace at http://localhost:16686 + +Example: + +![jarget_grpc](jaeger_trace_grpc.png) + diff --git a/executor/samples/local/tracing/jaeger_trace_grpc.png b/executor/samples/local/tracing/jaeger_trace_grpc.png new file mode 100644 index 0000000000..f27d8f779b Binary files /dev/null and b/executor/samples/local/tracing/jaeger_trace_grpc.png differ diff --git a/executor/samples/local/tracing/jaeger_trace_rest.png b/executor/samples/local/tracing/jaeger_trace_rest.png new file mode 100644 index 0000000000..7d9a4a2803 Binary files /dev/null and b/executor/samples/local/tracing/jaeger_trace_rest.png differ diff --git a/executor/samples/local/tracing/model_grpc.yaml b/executor/samples/local/tracing/model_grpc.yaml new file mode 100644 index 0000000000..265f0e78e4 --- /dev/null +++ b/executor/samples/local/tracing/model_grpc.yaml @@ -0,0 +1,28 @@ +apiVersion: machinelearning.seldon.io/v1alpha2 +kind: SeldonDeployment +metadata: + labels: + app: seldon + name: seldon-model +spec: + annotations: + seldon.io/executor: "true" + name: test-deployment + predictors: + - componentSpecs: + - spec: + containers: + - image: seldonio/mock_classifier_rest:1.3 + name: classifier + graph: + children: [] + endpoint: + type: GRPC + service_host: 0.0.0.0 + service_port: 9000 + name: classifier + type: MODEL + labels: + version: v1 + name: example + replicas: 1 diff --git a/executor/samples/local/tracing/model_rest.yaml b/executor/samples/local/tracing/model_rest.yaml new file mode 100644 index 0000000000..0bece0ef37 --- /dev/null +++ b/executor/samples/local/tracing/model_rest.yaml @@ -0,0 +1,28 @@ +apiVersion: machinelearning.seldon.io/v1alpha2 +kind: SeldonDeployment +metadata: + labels: + app: seldon + name: seldon-model +spec: + annotations: + seldon.io/executor: "true" + name: test-deployment + predictors: + - componentSpecs: + - spec: + containers: + - image: seldonio/mock_classifier_rest:1.3 + name: classifier + graph: + children: [] + endpoint: + type: REST + service_host: 0.0.0.0 + service_port: 9000 + name: classifier + type: MODEL + labels: + version: v1 + name: example + replicas: 1 diff --git a/helm-charts/seldon-abtest/templates/ab_test_1pod.json b/helm-charts/seldon-abtest/templates/ab_test_1pod.json index ce3c77ebd8..a517eb90e8 100644 --- a/helm-charts/seldon-abtest/templates/ab_test_1pod.json +++ b/helm-charts/seldon-abtest/templates/ab_test_1pod.json @@ -22,7 +22,7 @@ "spec": { "containers": [ { - "image": "{{ .Values.modelb.image.name }}", + "image": "{{ .Values.modela.image.name }}_{{ lower .Values.protocol }}:{{ .Values.modela.image.version }}", "imagePullPolicy": "IfNotPresent", "name": "{{ .Values.modela.name }}", "resources": { @@ -32,7 +32,7 @@ } }, { - "image": "{{ .Values.modelb.image.name }}", + "image": "{{ .Values.modelb.image.name }}_{{ lower .Values.protocol }}:{{ .Values.modelb.image.version }}", "imagePullPolicy": "IfNotPresent", "name": "{{ .Values.modelb.name }}", "resources": { @@ -47,7 +47,9 @@ }], "graph": { "name": "{{ .Release.Name }}", - "endpoint":{}, + "endpoint":{ + "type":"{{ .Values.protocol }}" + }, "implementation":"RANDOM_ABTEST", "parameters": [ { @@ -60,7 +62,7 @@ { "name": "{{ .Values.modela.name }}", "endpoint":{ - "type":"{{ .Values.modela.endpoint }}" + "type":"{{ .Values.protocol }}" }, "type":"MODEL", "children":[] @@ -68,7 +70,7 @@ { "name": "{{ .Values.modelb.name }}", "endpoint":{ - "type":"{{ .Values.modela.endpoint }}" + "type":"{{ .Values.protocol }}" }, "type":"MODEL", "children":[] diff --git a/helm-charts/seldon-abtest/templates/ab_test_2pods.json b/helm-charts/seldon-abtest/templates/ab_test_2pods.json index 4f038dbd3c..e849db0002 100644 --- a/helm-charts/seldon-abtest/templates/ab_test_2pods.json +++ b/helm-charts/seldon-abtest/templates/ab_test_2pods.json @@ -22,7 +22,7 @@ "spec": { "containers": [ { - "image": "{{ .Values.modelb.image.name }}", + "image": "{{ .Values.modela.image.name }}_{{ lower .Values.protocol }}:{{ .Values.modela.image.version }}", "imagePullPolicy": "IfNotPresent", "name": "{{ .Values.modela.name }}", "resources": { @@ -41,8 +41,8 @@ }, "spec":{ "containers":[ - { - "image": "{{ .Values.modelb.image.name }}", + { + "image": "{{ .Values.modela.image.name }}_{{ lower .Values.protocol }}:{{ .Values.modela.image.version }}", "imagePullPolicy": "IfNotPresent", "name": "{{ .Values.modelb.name }}", "resources": { @@ -57,7 +57,9 @@ }], "graph": { "name": "{{ .Release.Name }}", - "endpoint":{}, + "endpoint":{ + "type":"{{ .Values.protocol }}" + }, "implementation":"RANDOM_ABTEST", "parameters": [ { @@ -70,7 +72,7 @@ { "name": "{{ .Values.modela.name }}", "endpoint":{ - "type":"REST" + "type":"{{ .Values.protocol }}" }, "type":"MODEL", "children":[] @@ -78,7 +80,7 @@ { "name": "{{ .Values.modelb.name }}", "endpoint":{ - "type":"REST" + "type":"{{ .Values.protocol }}" }, "type":"MODEL", "children":[] diff --git a/helm-charts/seldon-abtest/values.yaml b/helm-charts/seldon-abtest/values.yaml index 12ae08888a..400c8f1948 100644 --- a/helm-charts/seldon-abtest/values.yaml +++ b/helm-charts/seldon-abtest/values.yaml @@ -1,14 +1,15 @@ +protocol: REST separate_pods: true modela: image: - name: seldonio/mock_classifier:1.0 - endpoint: REST + name: seldonio/mock_classifier + version: 1.3 name: classifier-1 # resources: { "requests": { "memory": "1Mi" }} modelb: image: - name: seldonio/mock_classifier:1.0 - endpoint: REST + name: seldonio/mock_classifier + version: 1.3 name: classifier-2 traffic_modela_percentage: 0.5 replicas: 1 diff --git a/helm-charts/seldon-core-analytics/files/grafana/configs/predictions-analytics-dashboard.json b/helm-charts/seldon-core-analytics/files/grafana/configs/predictions-analytics-dashboard.json index 130c40ac71..a657cbf867 100644 --- a/helm-charts/seldon-core-analytics/files/grafana/configs/predictions-analytics-dashboard.json +++ b/helm-charts/seldon-core-analytics/files/grafana/configs/predictions-analytics-dashboard.json @@ -1,1031 +1,1141 @@ { - "__requires": [ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 4, + "iteration": 1579424925772, + "links": [], + "panels": [ { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "4.3.2" + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 40, + "panels": [], + "repeat": null, + "title": "Heading", + "type": "row" }, { - "type": "panel", - "id": "graph", - "name": "Graph", - "version": "" + "content": "
\n Seldon Core API Dashboard\n
", + "datasource": null, + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 1 + }, + "id": 27, + "links": [], + "mode": "html", + "options": {}, + "title": "", + "type": "text" }, { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 4 + }, + "id": 41, + "panels": [], + "repeat": null, + "title": "Global Counts", + "type": "row" }, { - "type": "panel", - "id": "singlestat", - "name": "Singlestat", - "version": "" + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "format": "ops", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 0, + "y": 5 + }, + "id": 16, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "round(sum(irate(seldon_api_executor_server_requests_seconds_count[1m])),0.001)", + "format": "time_series", + "instant": false, + "intervalFactor": 2, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Global Request Rate", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" }, { - "type": "panel", - "id": "text", - "name": "Text", - "version": "" - } - ], - "annotations": { - "list": [] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "hideControls": false, - "id": null, - "links": [], - "refresh": "5s", - "rows": [ + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "format": "percentunit", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 6, + "y": 5 + }, + "id": 17, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(rate(seldon_api_executor_server_requests_seconds_count{status!~\"5.*\"}[1m])) / sum(rate(seldon_api_executor_server_requests_seconds_count[1m]))", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Success", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, { - "collapse": false, - "height": 67, - "panels": [ + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "format": "ops", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 12, + "y": 5 + }, + "id": 18, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ { - "content": "
\n Seldon Core API Dashboard\n
", - "id": 27, - "links": [], - "mode": "html", - "span": 12, - "title": "", - "type": "text" + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 } ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Heading", - "titleSize": "h6" + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(irate(seldon_api_executor_server_requests_seconds_count{status=~\"4.*\"}[1m])) ", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "4xxs", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" }, { - "collapse": false, - "height": "93", - "panels": [ + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "format": "ops", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 18, + "y": 5 + }, + "id": 19, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "datasource": "prometheus", - "format": "ops", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "id": 16, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 3, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "round(sum(irate(seldon_api_engine_server_requests_seconds_count[1m])), 0.001)", - "format": "time_series", - "intervalFactor": 2, - "refId": "A", - "step": 20 - } - ], - "thresholds": "", - "title": "Global Request Rate", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" + "name": "value to text", + "value": 1 }, { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "datasource": "prometheus", - "format": "percentunit", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "id": 17, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 3, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(rate(seldon_api_engine_server_requests_seconds_count{status!~\"5.*\"}[1m])) / sum(rate(seldon_api_engine_server_requests_seconds_count[1m]))", - "format": "time_series", - "intervalFactor": 2, - "refId": "A", - "step": 20 - } - ], - "thresholds": "", - "title": "Success", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(irate(seldon_api_executor_server_requests_seconds_count{status=~\"5.*\"}[1m])) ", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "5xxs", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 8 + }, + "id": 42, + "panels": [], + "repeat": "deployment", + "title": "Deployment Counts $deployment", + "type": "row" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "format": "ops", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 0, + "y": 9 + }, + "id": 36, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 }, { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "datasource": "prometheus", - "format": "ops", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "id": 18, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 3, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(irate(seldon_api_engine_server_requests_seconds_count{status=~\"4.*\"}[1m])) ", - "format": "time_series", - "intervalFactor": 2, - "refId": "A", - "step": 20 - } - ], - "thresholds": "", - "title": "4xxs", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "round(sum(irate(seldon_api_executor_client_requests_seconds_count{deployment_name=~'$deployment'}[1m])), 0.001)", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Request Rate ($deployment)", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "format": "percentunit", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 6, + "y": 9 + }, + "id": 37, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 }, { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "datasource": "prometheus", - "format": "ops", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "id": 19, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 3, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(irate(seldon_api_engine_server_requests_seconds_count{status=~\"5.*\"}[1m])) ", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "refId": "A", - "step": 20 - } - ], - "thresholds": "", - "title": "5xxs", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" + "name": "range to text", + "value": 2 } ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Global Counts", - "titleSize": "h6" + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(rate(seldon_api_executor_client_requests_seconds_count{deployment_name=~\"$deployment\",status!~\"5.*\"}[1m])) / sum(rate(seldon_api_executor_client_requests_seconds_count{deployment_name=~\"$deployment\"}[1m]))", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Success ($deployment)", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" }, { - "collapse": false, - "height": "93", - "panels": [ + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "format": "ops", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 12, + "y": 9 + }, + "id": 38, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "datasource": "prometheus", - "format": "ops", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "id": 36, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 3, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "round(sum(irate(seldon_api_engine_client_requests_seconds_count{deployment_name=~'$deployment'}[1m])), 0.001)", - "format": "time_series", - "intervalFactor": 2, - "refId": "A", - "step": 20 - } - ], - "thresholds": "", - "title": "Request Rate ($deployment)", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" + "name": "value to text", + "value": 1 }, { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "datasource": "prometheus", - "format": "percentunit", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "id": 37, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 3, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(rate(seldon_api_engine_client_requests_seconds_count{deployment_name=~\"$deployment\",status!~\"5.*\"}[1m])) / sum(rate(seldon_api_engine_client_requests_seconds_count{deployment_name=~\"$deployment\"}[1m]))", - "format": "time_series", - "intervalFactor": 2, - "refId": "A", - "step": 20 - } - ], - "thresholds": "", - "title": "Success ($deployment)", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(irate(seldon_api_executor_client_requests_seconds_count{deployment_name=~\"$deployment\",status=~\"4.*\"}[1m]))", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "4xxs ($deployment)", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "format": "ops", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 18, + "y": 9 + }, + "id": 39, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 }, { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "datasource": "prometheus", - "format": "ops", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "id": 38, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 3, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(irate(seldon_api_engine_client_requests_seconds_count{deployment_name=~\"$deployment\",status=~\"4.*\"}[1m]))", - "format": "time_series", - "intervalFactor": 2, - "refId": "A", - "step": 20 - } - ], - "thresholds": "", - "title": "4xxs ($deployment)", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(irate(seldon_api_executor_client_requests_seconds_count{deployment_name=~\"$deployment\",status=~\"5.*\"}[1m]))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "5xxs ($deployment)", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 12 + }, + "id": 43, + "panels": [], + "repeat": null, + "title": "Models", + "type": "row" + }, + { + "content": "
\n Models\n
", + "datasource": null, + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 13 + }, + "id": 8, + "links": [], + "mode": "html", + "options": {}, + "title": "", + "type": "text" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 44, + "panels": [], + "repeat": "model_image", + "title": "Model Metrics $model_image", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 17 + }, + "hiddenSeries": false, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(seldon_api_executor_client_requests_seconds_count{model_name=~\"$model_name\",model_version=~\"$model_version\",model_image=~\"$model_image\",predictor_name=~\"$predictor\",predictor_version=~\"$version\"}[5s])) by (model_name,predictor_name,predictor_version,model_image,model_version,service)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{predictor_name}}:{{predictor_version}} ({{model_name}} {{model_image}} : {{model_version}}) {{service}}", + "metric": "io_seldon_apife_api_rest_RestClientController_home_snapshot_75thPercentile", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Reqs/sec to $model_image", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true }, { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "datasource": "prometheus", - "format": "ops", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "id": 39, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 3, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(irate(seldon_api_engine_client_requests_seconds_count{deployment_name=~\"$deployment\",status=~\"5.*\"}[1m]))", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "refId": "A", - "step": 20 - } - ], - "thresholds": "", - "title": "5xxs ($deployment)", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true } ], - "repeat": "deployment", - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Deployment Counts", - "titleSize": "h6" + "yaxis": { + "align": false, + "alignLevel": null + } }, { - "collapse": false, - "height": "50", - "panels": [ + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 8, + "y": 17 + }, + "hiddenSeries": false, + "id": 28, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ { - "content": "
\n Models\n
", - "id": 8, - "links": [], - "mode": "html", - "span": 12, - "title": "", - "type": "text" + "expr": "rate(seldon_api_model_feedback_reward_total{deployment_name=~\"$deployment\",predictor_name=~\"$predictor\",predictor_version=~\"$version\",model_image=~\"$model_image\",model_version=~\"$model_version\",model_name=~\"$model_name\"}[1m])/rate(seldon_api_model_feedback_total{deployment_name=~\"$deployment\",predictor_name=~\"$predictor\",predictor_version=~\"$version\",model_image=~\"$model_image\",model_version=~\"$model_version\",model_name=~\"$model_name\"}[1m])", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{deployment_name}}/{{predictor_name}}:{{predictor_version}} {{model_name}} {{model_image}} : {{model_version}}", + "metric": "", + "refId": "A", + "step": 2 } ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Predictive Units", - "titleSize": "h6" + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "$model_image Reward", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } }, { - "collapse": false, - "height": 334, - "panels": [ + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 16, + "y": 17 + }, + "hiddenSeries": false, + "id": 11, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "prometheus", - "fill": 1, - "id": 7, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 4, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(seldon_api_engine_client_requests_seconds_count{model_name=~\"$model_name\",model_version=~\"$model_version\",model_image=~\"$model_image\",predictor_name=~\"$predictor\",predictor_version=~\"$version\"}[1m])) by (model_name,predictor_name,predictor_version,model_image,model_version)", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{predictor_name}}:{{predictor_version}} ({{model_name}} {{model_image}} : {{model_version}})", - "metric": "io_seldon_apife_api_rest_RestClientController_home_snapshot_75thPercentile", - "refId": "A", - "step": 2 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Reqs/sec to $model_image", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] + "expr": "histogram_quantile(0.5, sum(rate(seldon_api_executor_client_requests_seconds_bucket{service=~\"/Predict\", model_image=~\"$model_image\",predictor_name=~\"$predictor\",predictor_version=~\"$version\",model_name=~\"$model_name\",model_version=~\"$model_version\"}[20s])) by (predictor_name,predictor_version,model_name,model_image,model_version,le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{predictor_name}}:{{predictor_version}} {{model_name}} {{model_image}}: {{model_version}} (p50)", + "metric": "", + "refId": "E", + "step": 2 }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "prometheus", - "fill": 1, - "id": 28, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 4, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(seldon_api_model_feedback_reward_total{deployment_name=~\"$deployment\",predictor_name=~\"$predictor\",predictor_version=~\"$version\",model_image=~\"$model_image\",model_version=~\"$model_version\",model_name=~\"$model_name\"}[1m])/rate(seldon_api_model_feedback_total{deployment_name=~\"$deployment\",predictor_name=~\"$predictor\",predictor_version=~\"$version\",model_image=~\"$model_image\",model_version=~\"$model_version\",model_name=~\"$model_name\"}[1m])", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{deployment_name}}/{{predictor_name}}:{{predictor_version}} {{model_name}} {{model_image}} : {{model_version}}", - "metric": "", - "refId": "A", - "step": 2 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "$model_image Reward", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] + "expr": "histogram_quantile(0.75, sum(rate(seldon_api_executor_client_requests_seconds_bucket{service=~\".*[Pp]redict\",model_image=~\"$model_image\",predictor_name=~\"$predictor\",predictor_version=~\"$version\",model_name=~\"$model_name\",model_version=~\"$model_version\"}[20s])) by (predictor_name,predictor_version,model_name,model_image,model_version,service,le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{predictor_name}}:{{predictor_version}} {{model_name}} {{model_image}}:{{model_version}} {{service}} (p75)", + "metric": "", + "refId": "B", + "step": 2 }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "prometheus", - "fill": 1, - "id": 11, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 4, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(0.5, sum(rate(seldon_api_engine_client_requests_seconds_bucket{uri=\"/predict\",model_image=~\"$model_image\",predictor_name=~\"$predictor\",predictor_version=~\"$version\",model_name=~\"$model_name\",model_version=~\"$model_version\"}[20s])) by (predictor_name,predictor_version,model_name,model_image,model_version,le))", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{predictor_name}}:{{predictor_version}} {{model_name}} {{model_image}}: {{model_version}} (p50)", - "metric": "", - "refId": "E", - "step": 2 - }, - { - "expr": "histogram_quantile(0.75, sum(rate(seldon_api_engine_client_requests_seconds_bucket{uri=\"/predict\",model_image=~\"$model_image\",predictor_name=~\"$predictor\",predictor_version=~\"$version\",model_name=~\"$model_name\",model_version=~\"$model_version\"}[20s])) by (predictor_name,predictor_version,model_name,model_image,model_version,le))", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{predictor_name}}:{{predictor_version}} {{model_name}} {{model_image}}:{{model_version}} (p75)", - "metric": "", - "refId": "B", - "step": 2 - }, - { - "expr": "histogram_quantile(0.9, sum(rate(seldon_api_engine_client_requests_seconds_bucket{uri=\"/predict\",model_image=~\"$model_image\",predictor_name=~\"$predictor\",predictor_version=~\"$version\",model_name=~\"$model_name\",model_version=~\"$model_version\"}[20s])) by (predictor_name,predictor_version,model_name,model_image,model_version,le))", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{predictor_name}}:{{predictor_version}} {{model_name}} {{model_image}}:{{model_version}} (p90)", - "metric": "", - "refId": "A", - "step": 2 - }, - { - "expr": "histogram_quantile(0.95, sum(rate(seldon_api_engine_client_requests_seconds_bucket{uri=\"/predict\",model_image=~\"$model_image\",predictor_name=~\"$predictor\",predictor_version=~\"$version\",model_name=~\"$model_name\",model_version=~\"$model_version\"}[20s])) by (predictor_name,predictor_version,model_name,model_image,model_version,le))", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{predictor_name}}:{{predictor_version}} {{model_name}} {{model_image}}:{{model_version}} (p95)", - "metric": "", - "refId": "C", - "step": 2 - }, - { - "expr": "histogram_quantile(0.99, sum(rate(seldon_api_engine_client_requests_seconds_bucket{uri=\"/predict\",model_image=~\"$model_image\",predictor_name=~\"$predictor\",predictor_version=~\"$version\",model_name=~\"$model_name\",model_version=~\"$model_version\"}[20s])) by (predictor_name,predictor_version,model_name,model_image,model_version,le))", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{predictor_name}}:{{predictor_version}} {{model_name}} {{model_image}}:{{model_version}} (p99)", - "metric": "", - "refId": "D", - "step": 2 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "$model_image Latency", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] + "expr": "histogram_quantile(0.9, sum(rate(seldon_api_executor_client_requests_seconds_bucket{service=~\".*[Pp]redict\",model_image=~\"$model_image\",predictor_name=~\"$predictor\",predictor_version=~\"$version\",model_name=~\"$model_name\",model_version=~\"$model_version\"}[20s])) by (predictor_name,predictor_version,model_name,model_image,model_version, service,le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{predictor_name}}:{{predictor_version}} {{model_name}} {{model_image}}:{{model_version}} {{service}} (p90)", + "metric": "", + "refId": "A", + "step": 2 + }, + { + "expr": "histogram_quantile(0.95, sum(rate(seldon_api_executor_client_requests_seconds_bucket{service=~\".*[Pp]redict\",model_image=~\"$model_image\",predictor_name=~\"$predictor\",predictor_version=~\"$version\",model_name=~\"$model_name\",model_version=~\"$model_version\"}[20s])) by (predictor_name,predictor_version,model_name,model_image,model_version,service,le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{predictor_name}}:{{predictor_version}} {{model_name}} {{model_image}}:{{model_version}} {{service}} (p95)", + "metric": "", + "refId": "C", + "step": 2 + }, + { + "expr": "histogram_quantile(0.99, sum(rate(seldon_api_executor_client_requests_seconds_bucket{service=~\".*[Pp]redict\",model_image=~\"$model_image\",predictor_name=~\"$predictor\",predictor_version=~\"$version\",model_name=~\"$model_name\",model_version=~\"$model_version\"}[20s])) by (predictor_name,predictor_version,model_name,model_image,model_version,service,le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{predictor_name}}:{{predictor_version}} {{model_name}} {{model_image}}:{{model_version}} {{service}} (p99)", + "metric": "", + "refId": "D", + "step": 2 } ], - "repeat": "model_image", - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Model Metrics", - "titleSize": "h6" + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "$model_image Latency", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } } ], - "schemaVersion": 14, + "refresh": "5s", + "schemaVersion": 21, "style": "dark", - "tags": [], + "tags": [ + "seldon" + ], "templating": { "list": [ { "allValue": ".*", - "current": {}, + "current": { + "text": "All", + "value": [ + "$__all" + ] + }, "datasource": "prometheus", + "definition": "label_values(seldon_api_executor_client_requests_seconds_count,deployment_name)", "hide": 0, "includeAll": true, "label": null, "multi": true, "name": "deployment", "options": [], - "query": "label_values(seldon_api_engine_client_requests_seconds_count,deployment_name)", + "query": "label_values(seldon_api_executor_client_requests_seconds_count,deployment_name)", "refresh": 1, "regex": "", + "skipUrlSync": false, "sort": 0, "tagValuesQuery": "", "tags": [], @@ -1035,17 +1145,23 @@ }, { "allValue": ".*", - "current": {}, + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, "datasource": "prometheus", + "definition": "label_values(seldon_api_executor_client_requests_seconds_count,predictor_name)", "hide": 0, "includeAll": true, "label": null, "multi": true, "name": "predictor", "options": [], - "query": "label_values(seldon_api_engine_client_requests_seconds_count,predictor_name)", + "query": "label_values(seldon_api_executor_client_requests_seconds_count,predictor_name)", "refresh": 1, "regex": "", + "skipUrlSync": false, "sort": 0, "tagValuesQuery": "", "tags": [], @@ -1055,17 +1171,23 @@ }, { "allValue": ".*", - "current": {}, + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, "datasource": "prometheus", + "definition": "label_values(seldon_api_executor_client_requests_seconds_count,predictor_version)", "hide": 0, "includeAll": true, "label": null, "multi": true, "name": "version", "options": [], - "query": "label_values(seldon_api_engine_client_requests_seconds_count,predictor_version)", + "query": "label_values(seldon_api_executor_client_requests_seconds_count,predictor_version)", "refresh": 1, "regex": "", + "skipUrlSync": false, "sort": 0, "tagValuesQuery": "", "tags": [], @@ -1075,17 +1197,23 @@ }, { "allValue": ".*", - "current": {}, + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, "datasource": "prometheus", + "definition": "label_values(seldon_api_executor_client_requests_seconds_count,model_name)", "hide": 0, "includeAll": true, "label": null, "multi": true, "name": "model_name", "options": [], - "query": "label_values(seldon_api_engine_client_requests_seconds_count,model_name)", + "query": "label_values(seldon_api_executor_client_requests_seconds_count,model_name)", "refresh": 1, "regex": "", + "skipUrlSync": false, "sort": 0, "tagValuesQuery": "", "tags": [], @@ -1095,17 +1223,23 @@ }, { "allValue": ".*", - "current": {}, + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, "datasource": "prometheus", + "definition": "label_values(seldon_api_executor_client_requests_seconds_count,model_image)", "hide": 0, "includeAll": true, "label": null, "multi": true, "name": "model_image", "options": [], - "query": "label_values(seldon_api_engine_client_requests_seconds_count,model_image)", + "query": "label_values(seldon_api_executor_client_requests_seconds_count,model_image)", "refresh": 1, "regex": "", + "skipUrlSync": false, "sort": 0, "tagValuesQuery": "", "tags": [], @@ -1115,17 +1249,23 @@ }, { "allValue": ".*", - "current": {}, + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, "datasource": "prometheus", + "definition": "label_values(seldon_api_executor_client_requests_seconds_count,model_version)", "hide": 0, "includeAll": true, "label": null, "multi": true, "name": "model_version", "options": [], - "query": "label_values(seldon_api_engine_client_requests_seconds_count,model_version)", + "query": "label_values(seldon_api_executor_client_requests_seconds_count,model_version)", "refresh": 1, "regex": "", + "skipUrlSync": false, "sort": 0, "tagValuesQuery": "", "tags": [], @@ -1166,5 +1306,6 @@ }, "timezone": "browser", "title": "Prediction Analytics", + "uid": "U1cSDzyZz", "version": 1 } diff --git a/helm-charts/seldon-core-operator/templates/customresourcedefinition_seldondeployments.machinelearning.seldon.io.yaml b/helm-charts/seldon-core-operator/templates/customresourcedefinition_seldondeployments.machinelearning.seldon.io.yaml index 377224f04c..1806d71d75 100644 --- a/helm-charts/seldon-core-operator/templates/customresourcedefinition_seldondeployments.machinelearning.seldon.io.yaml +++ b/helm-charts/seldon-core-operator/templates/customresourcedefinition_seldondeployments.machinelearning.seldon.io.yaml @@ -2598,6 +2598,16 @@ spec: type: string implementation: type: string + logger: + description: Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version. + properties: + mode: + description: What payloads to log + type: string + url: + description: URL to send request logging CloudEvents + type: string + type: object methods: items: type: string @@ -2637,6 +2647,16 @@ spec: type: string implementation: type: string + logger: + description: Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version. + properties: + mode: + description: What payloads to log + type: string + url: + description: URL to send request logging CloudEvents + type: string + type: object methods: items: type: string @@ -2676,6 +2696,16 @@ spec: type: string implementation: type: string + logger: + description: Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version. + properties: + mode: + description: What payloads to log + type: string + url: + description: URL to send request logging CloudEvents + type: string + type: object methods: items: type: string @@ -2715,6 +2745,16 @@ spec: type: string implementation: type: string + logger: + description: Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version. + properties: + mode: + description: What payloads to log + type: string + url: + description: URL to send request logging CloudEvents + type: string + type: object methods: items: type: string @@ -2754,6 +2794,16 @@ spec: type: string implementation: type: string + logger: + description: Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version. + properties: + mode: + description: What payloads to log + type: string + url: + description: URL to send request logging CloudEvents + type: string + type: object methods: items: type: string diff --git a/helm-charts/seldon-core-operator/templates/deployment_seldon-controller-manager.yaml b/helm-charts/seldon-core-operator/templates/deployment_seldon-controller-manager.yaml index 534eb33163..5f02bee22b 100644 --- a/helm-charts/seldon-core-operator/templates/deployment_seldon-controller-manager.yaml +++ b/helm-charts/seldon-core-operator/templates/deployment_seldon-controller-manager.yaml @@ -71,6 +71,22 @@ spec: value: '{{ .Values.istio.gateway }}' - name: ISTIO_TLS_MODE value: '{{ .Values.istio.tlsMode }}' + - name: USE_EXECUTOR + value: '{{ .Values.executor.enabled }}' + - name: EXECUTOR_CONTAINER_IMAGE_AND_VERSION + value: '{{ .Values.executor.image.registry }}/{{ .Values.executor.image.repository }}:{{ .Values.executor.image.tag }}' + - name: EXECUTOR_CONTAINER_IMAGE_PULL_POLICY + value: '{{ .Values.executor.image.pullPolicy }}' + - name: EXECUTOR_PROMETHEUS_PATH + value: '{{ .Values.executor.prometheus.path }}' + - name: EXECUTOR_SERVER_GRPC_PORT + value: '{{ .Values.engine.grpc.port }}' + - name: EXECUTOR_SERVER_PORT + value: '{{ .Values.executor.port }}' + - name: EXECUTOR_CONTAINER_USER + value: '{{ .Values.executor.user }}' + - name: EXECUTOR_CONTAINER_SERVICE_ACCOUNT_NAME + value: '{{ .Values.executor.serviceAccount.name }}' image: '{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}' imagePullPolicy: '{{ .Values.image.pullPolicy }}' name: manager diff --git a/helm-charts/seldon-core-operator/templates/webhook.yaml b/helm-charts/seldon-core-operator/templates/webhook.yaml index 28b39c6d8d..71d0918bbb 100644 --- a/helm-charts/seldon-core-operator/templates/webhook.yaml +++ b/helm-charts/seldon-core-operator/templates/webhook.yaml @@ -3,20 +3,6 @@ {{- $cert := genSignedCert "seldon-webhook-service" nil $altNames 365 $ca -}} --- -{{- if not .Values.certManager.enabled -}} -apiVersion: v1 -data: - ca.crt: '{{ $ca.Cert | b64enc }}' - tls.crt: '{{ $cert.Cert | b64enc }}' - tls.key: '{{ $cert.Key | b64enc }}' -kind: Secret -metadata: - name: seldon-webhook-server-cert - namespace: '{{ .Release.Namespace }}' -type: kubernetes.io/tls -{{- end }} ---- - apiVersion: admissionregistration.k8s.io/v1beta1 kind: ValidatingWebhookConfiguration metadata: @@ -309,3 +295,17 @@ webhooks: - UPDATE resources: - seldondeployments +--- + +{{- if not .Values.certManager.enabled -}} +apiVersion: v1 +data: + ca.crt: '{{ $ca.Cert | b64enc }}' + tls.crt: '{{ $cert.Cert | b64enc }}' + tls.key: '{{ $cert.Key | b64enc }}' +kind: Secret +metadata: + name: seldon-webhook-server-cert + namespace: '{{ .Release.Namespace }}' +type: kubernetes.io/tls +{{- end }} diff --git a/helm-charts/seldon-core-operator/values.yaml b/helm-charts/seldon-core-operator/values.yaml index 6645be9610..e0bdea5884 100644 --- a/helm-charts/seldon-core-operator/values.yaml +++ b/helm-charts/seldon-core-operator/values.yaml @@ -23,12 +23,25 @@ engine: logMessagesExternally: false port: 8000 prometheus: - path: prometheus - securityContext: - enabled: true + path: /prometheus serviceAccount: name: default user: 8888 +executor: + enabled: true + grpc: + port: 5001 + image: + pullPolicy: IfNotPresent + registry: docker.io + repository: seldonio/seldon-core-executor + tag: 1.0.2-SNAPSHOT + port: 8000 + prometheus: + path: /prometheus + serviceAccount: + name: default + user: 8888 image: pullPolicy: IfNotPresent registry: docker.io @@ -80,11 +93,11 @@ serviceAccount: name: seldon-manager singleNamespace: false storageInitializer: - cpuLimit: '1' - cpuRequest: 100m - image: gcr.io/kfserving/storage-initializer:0.2.1 - memoryLimit: 1Gi + image: gcr.io/kfserving/storage-initializer:0.2.2 memoryRequest: 100Mi + memoryLimit: 1Gi + cpuRequest: 100m + cpuLimit: '1' usageMetrics: enabled: false webhook: diff --git a/helm-charts/seldon-mab/templates/mab.json b/helm-charts/seldon-mab/templates/mab.json index 3e5b1b4b31..7e0d92850c 100644 --- a/helm-charts/seldon-mab/templates/mab.json +++ b/helm-charts/seldon-mab/templates/mab.json @@ -19,7 +19,7 @@ "spec": { "containers": [ { - "image": "{{ .Values.modela.image.name }}", + "image": "{{ .Values.modela.image.name }}_{{ lower .Values.protocol }}:{{ .Values.modela.image.version }}", "imagePullPolicy": "IfNotPresent", "name": "{{ .Values.modela.name }}", "resources": { @@ -33,8 +33,8 @@ { "spec":{ "containers":[ - { - "image": "{{ .Values.modelb.image.name }}", + { + "image": "{{ .Values.modelb.image.name }}_{{ lower .Values.protocol }}:{{ .Values.modelb.image.version }}", "imagePullPolicy": "IfNotPresent", "name": "{{ .Values.modelb.name }}", "resources": { @@ -50,7 +50,7 @@ { "spec":{ "containers": [{ - "image": "{{ .Values.mab.image.name }}", + "image": "{{ .Values.mab.image.name }}_{{ lower .Values.protocol }}:{{ .Values.mab.image.version }}", "name": "{{ .Values.mab.name }}" }], "terminationGracePeriodSeconds": 20 @@ -58,6 +58,9 @@ ], "graph": { "name": "{{ .Values.mab.name }}", + "endpoint": { + "type": "{{ .Values.protocol }}" + }, "type":"ROUTER", "parameters": [ { @@ -80,7 +83,7 @@ { "name": "{{ .Values.modela.name }}", "endpoint":{ - "type":"REST" + "type": "{{ .Values.protocol }}" }, "type":"MODEL", "children":[] @@ -88,7 +91,7 @@ { "name": "{{ .Values.modelb.name }}", "endpoint":{ - "type":"REST" + "type": "{{ .Values.protocol }}" }, "type":"MODEL", "children":[] diff --git a/helm-charts/seldon-mab/values.yaml b/helm-charts/seldon-mab/values.yaml index fbd026ecc0..d657699a06 100644 --- a/helm-charts/seldon-mab/values.yaml +++ b/helm-charts/seldon-mab/values.yaml @@ -1,16 +1,20 @@ +protocol: REST modela: image: - name: seldonio/mock_classifier:1.0 + name: seldonio/mock_classifier + version: 1.3 endpoint: REST name: classifier-1 modelb: image: - name: seldonio/mock_classifier:1.0 + name: seldonio/mock_classifier + version: 1.3 endpoint: REST name: classifier-2 mab: image: - name: seldonio/mab_epsilon_greedy:1.1 + name: seldonio/mab_epsilon_greedy + version: 1.3 name: eg-router branches: 2 epsilon: 0.2 @@ -38,4 +42,4 @@ engine: SELDON_LOG_REQUESTS: false SELDON_LOG_RESPONSES: false SELDON_LOG_MESSAGES_EXTERNALLY: false - SELDON_LOG_MESSAGE_TYPE: "seldon.message.pair" \ No newline at end of file + SELDON_LOG_MESSAGE_TYPE: "seldon.message.pair" diff --git a/helm-charts/seldon-single-model/templates/model.json b/helm-charts/seldon-single-model/templates/model.json index f897a8fddd..cea67ef5c8 100644 --- a/helm-charts/seldon-single-model/templates/model.json +++ b/helm-charts/seldon-single-model/templates/model.json @@ -18,7 +18,7 @@ "spec": { "containers": [ { - "image": "{{ .Values.model.image.name }}", + "image": "{{ .Values.model.image.name }}_{{ lower .Values.protocol }}:{{ .Values.model.image.version }}", "imagePullPolicy": "{{ .Values.model.image.pullPolicy }}", "name": "{{ .Values.model.name }}", "resources": {{ .Values.model.resources | toJson }}, @@ -41,7 +41,7 @@ "name": "{{ .Values.model.name }}", "type": "MODEL", "endpoint": { - "type": "REST" + "type": "{{ .Values.protocol }}" }}, "svcOrchSpec": { "resources": {{ .Values.engine.resources | toJson }}, diff --git a/helm-charts/seldon-single-model/values.yaml b/helm-charts/seldon-single-model/values.yaml index 2768100bbe..f8dc8fa97c 100644 --- a/helm-charts/seldon-single-model/values.yaml +++ b/helm-charts/seldon-single-model/values.yaml @@ -1,8 +1,10 @@ +protocol: REST name: my-model apiVersion: machinelearning.seldon.io/v1 model: image: - name: seldonio/mock_classifier:1.0 + name: seldonio/mock_classifier + version: 1.3 pullPolicy: IfNotPresent name: classifier resources: diff --git a/notebooks/helm_examples.ipynb b/notebooks/helm_examples.ipynb index 07389ad2c5..0a39d8019f 100644 --- a/notebooks/helm_examples.ipynb +++ b/notebooks/helm_examples.ipynb @@ -18,15 +18,41 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "## Serve Single Model" + "!kubectl create namespace seldon" ] }, { "cell_type": "code", "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Context \"kind-kind\" modified.\r\n" + ] + } + ], + "source": [ + "!kubectl config set-context $(kubectl config current-context) --namespace=seldon" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Serve Single REST Model" + ] + }, + { + "cell_type": "code", + "execution_count": 9, "metadata": { "scrolled": true }, @@ -36,7 +62,7 @@ "output_type": "stream", "text": [ "NAME: mymodel\r\n", - "LAST DEPLOYED: Mon Dec 2 12:36:13 2019\r\n", + "LAST DEPLOYED: Sat Jan 4 17:08:31 2020\r\n", "NAMESPACE: seldon\r\n", "STATUS: deployed\r\n", "REVISION: 1\r\n", @@ -50,7 +76,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "metadata": { "scrolled": true }, @@ -59,55 +85,55 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001b[04m\u001b[91m-\u001b[39;49;00m\u001b[04m\u001b[91m-\u001b[39;49;00m\u001b[04m\u001b[91m-\u001b[39;49;00m\r\n", - "\u001b[04m\u001b[91m#\u001b[39;49;00m \u001b[04m\u001b[91mS\u001b[39;49;00m\u001b[04m\u001b[91mo\u001b[39;49;00m\u001b[04m\u001b[91mu\u001b[39;49;00m\u001b[04m\u001b[91mr\u001b[39;49;00m\u001b[04m\u001b[91mc\u001b[39;49;00m\u001b[04m\u001b[91me\u001b[39;49;00m\u001b[04m\u001b[91m:\u001b[39;49;00m \u001b[04m\u001b[91ms\u001b[39;49;00m\u001b[04m\u001b[91me\u001b[39;49;00m\u001b[04m\u001b[91ml\u001b[39;49;00m\u001b[04m\u001b[91md\u001b[39;49;00m\u001b[04m\u001b[91mo\u001b[39;49;00m\u001b[04m\u001b[91mn\u001b[39;49;00m\u001b[04m\u001b[91m-\u001b[39;49;00m\u001b[04m\u001b[91ms\u001b[39;49;00m\u001b[04m\u001b[91mi\u001b[39;49;00m\u001b[04m\u001b[91mn\u001b[39;49;00m\u001b[04m\u001b[91mg\u001b[39;49;00m\u001b[04m\u001b[91ml\u001b[39;49;00m\u001b[04m\u001b[91me\u001b[39;49;00m\u001b[04m\u001b[91m-\u001b[39;49;00m\u001b[04m\u001b[91mm\u001b[39;49;00m\u001b[04m\u001b[91mo\u001b[39;49;00m\u001b[04m\u001b[91md\u001b[39;49;00m\u001b[04m\u001b[91me\u001b[39;49;00m\u001b[04m\u001b[91ml\u001b[39;49;00m\u001b[04m\u001b[91m/\u001b[39;49;00m\u001b[04m\u001b[91mt\u001b[39;49;00m\u001b[04m\u001b[91me\u001b[39;49;00m\u001b[04m\u001b[91mm\u001b[39;49;00m\u001b[04m\u001b[91mp\u001b[39;49;00m\u001b[04m\u001b[91ml\u001b[39;49;00m\u001b[04m\u001b[91ma\u001b[39;49;00m\u001b[04m\u001b[91mt\u001b[39;49;00m\u001b[04m\u001b[91me\u001b[39;49;00m\u001b[04m\u001b[91ms\u001b[39;49;00m\u001b[04m\u001b[91m/\u001b[39;49;00m\u001b[04m\u001b[91mm\u001b[39;49;00m\u001b[04m\u001b[91mo\u001b[39;49;00m\u001b[04m\u001b[91md\u001b[39;49;00m\u001b[04m\u001b[91me\u001b[39;49;00m\u001b[04m\u001b[91ml\u001b[39;49;00m\u001b[04m\u001b[91m.\u001b[39;49;00m\u001b[04m\u001b[91mj\u001b[39;49;00m\u001b[04m\u001b[91ms\u001b[39;49;00m\u001b[04m\u001b[91mo\u001b[39;49;00m\u001b[04m\u001b[91mn\u001b[39;49;00m\r\n", + "\u001b[04m\u001b[31;01m-\u001b[39;49;00m\u001b[04m\u001b[31;01m-\u001b[39;49;00m\u001b[04m\u001b[31;01m-\u001b[39;49;00m\r\n", + "\u001b[04m\u001b[31;01m#\u001b[39;49;00m \u001b[04m\u001b[31;01mS\u001b[39;49;00m\u001b[04m\u001b[31;01mo\u001b[39;49;00m\u001b[04m\u001b[31;01mu\u001b[39;49;00m\u001b[04m\u001b[31;01mr\u001b[39;49;00m\u001b[04m\u001b[31;01mc\u001b[39;49;00m\u001b[04m\u001b[31;01me\u001b[39;49;00m\u001b[04m\u001b[31;01m:\u001b[39;49;00m \u001b[04m\u001b[31;01ms\u001b[39;49;00m\u001b[04m\u001b[31;01me\u001b[39;49;00m\u001b[04m\u001b[31;01ml\u001b[39;49;00m\u001b[04m\u001b[31;01md\u001b[39;49;00m\u001b[04m\u001b[31;01mo\u001b[39;49;00m\u001b[04m\u001b[31;01mn\u001b[39;49;00m\u001b[04m\u001b[31;01m-\u001b[39;49;00m\u001b[04m\u001b[31;01ms\u001b[39;49;00m\u001b[04m\u001b[31;01mi\u001b[39;49;00m\u001b[04m\u001b[31;01mn\u001b[39;49;00m\u001b[04m\u001b[31;01mg\u001b[39;49;00m\u001b[04m\u001b[31;01ml\u001b[39;49;00m\u001b[04m\u001b[31;01me\u001b[39;49;00m\u001b[04m\u001b[31;01m-\u001b[39;49;00m\u001b[04m\u001b[31;01mm\u001b[39;49;00m\u001b[04m\u001b[31;01mo\u001b[39;49;00m\u001b[04m\u001b[31;01md\u001b[39;49;00m\u001b[04m\u001b[31;01me\u001b[39;49;00m\u001b[04m\u001b[31;01ml\u001b[39;49;00m\u001b[04m\u001b[31;01m/\u001b[39;49;00m\u001b[04m\u001b[31;01mt\u001b[39;49;00m\u001b[04m\u001b[31;01me\u001b[39;49;00m\u001b[04m\u001b[31;01mm\u001b[39;49;00m\u001b[04m\u001b[31;01mp\u001b[39;49;00m\u001b[04m\u001b[31;01ml\u001b[39;49;00m\u001b[04m\u001b[31;01ma\u001b[39;49;00m\u001b[04m\u001b[31;01mt\u001b[39;49;00m\u001b[04m\u001b[31;01me\u001b[39;49;00m\u001b[04m\u001b[31;01ms\u001b[39;49;00m\u001b[04m\u001b[31;01m/\u001b[39;49;00m\u001b[04m\u001b[31;01mm\u001b[39;49;00m\u001b[04m\u001b[31;01mo\u001b[39;49;00m\u001b[04m\u001b[31;01md\u001b[39;49;00m\u001b[04m\u001b[31;01me\u001b[39;49;00m\u001b[04m\u001b[31;01ml\u001b[39;49;00m\u001b[04m\u001b[31;01m.\u001b[39;49;00m\u001b[04m\u001b[31;01mj\u001b[39;49;00m\u001b[04m\u001b[31;01ms\u001b[39;49;00m\u001b[04m\u001b[31;01mo\u001b[39;49;00m\u001b[04m\u001b[31;01mn\u001b[39;49;00m\r\n", "{\r\n", - " \u001b[94m\"apiVersion\"\u001b[39;49;00m: \u001b[33m\"machinelearning.seldon.io/v1alpha2\"\u001b[39;49;00m,\r\n", - " \u001b[94m\"kind\"\u001b[39;49;00m: \u001b[33m\"SeldonDeployment\"\u001b[39;49;00m,\r\n", - " \u001b[94m\"metadata\"\u001b[39;49;00m: {\r\n", - " \u001b[94m\"labels\"\u001b[39;49;00m: {\u001b[94m\"app\"\u001b[39;49;00m:\u001b[33m\"seldon\"\u001b[39;49;00m},\r\n", - " \u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"RELEASE-NAME\"\u001b[39;49;00m\r\n", + " \u001b[34;01m\"apiVersion\"\u001b[39;49;00m: \u001b[04m\u001b[31;01mm\u001b[39;49;00m\u001b[04m\u001b[31;01ma\u001b[39;49;00m\u001b[04m\u001b[31;01mc\u001b[39;49;00m\u001b[04m\u001b[31;01mh\u001b[39;49;00m\u001b[04m\u001b[31;01mi\u001b[39;49;00m\u001b[04m\u001b[31;01mn\u001b[39;49;00m\u001b[04m\u001b[31;01me\u001b[39;49;00m\u001b[04m\u001b[31;01ml\u001b[39;49;00m\u001b[04m\u001b[31;01me\u001b[39;49;00m\u001b[04m\u001b[31;01ma\u001b[39;49;00m\u001b[04m\u001b[31;01mr\u001b[39;49;00m\u001b[04m\u001b[31;01mn\u001b[39;49;00m\u001b[04m\u001b[31;01mi\u001b[39;49;00m\u001b[04m\u001b[31;01mn\u001b[39;49;00m\u001b[04m\u001b[31;01mg\u001b[39;49;00m\u001b[04m\u001b[31;01m.\u001b[39;49;00m\u001b[04m\u001b[31;01ms\u001b[39;49;00m\u001b[04m\u001b[31;01me\u001b[39;49;00m\u001b[04m\u001b[31;01ml\u001b[39;49;00m\u001b[04m\u001b[31;01md\u001b[39;49;00m\u001b[04m\u001b[31;01mo\u001b[39;49;00m\u001b[04m\u001b[31;01mn\u001b[39;49;00m\u001b[04m\u001b[31;01m.\u001b[39;49;00m\u001b[04m\u001b[31;01mi\u001b[39;49;00m\u001b[04m\u001b[31;01mo\u001b[39;49;00m\u001b[04m\u001b[31;01m/\u001b[39;49;00m\u001b[04m\u001b[31;01mv\u001b[39;49;00m\u001b[34m1\u001b[39;49;00m,\r\n", + " \u001b[34;01m\"kind\"\u001b[39;49;00m: \u001b[33m\"SeldonDeployment\"\u001b[39;49;00m,\r\n", + " \u001b[34;01m\"metadata\"\u001b[39;49;00m: {\r\n", + " \u001b[34;01m\"labels\"\u001b[39;49;00m: {\u001b[34;01m\"app\"\u001b[39;49;00m:\u001b[33m\"seldon\"\u001b[39;49;00m},\r\n", + " \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"RELEASE-NAME\"\u001b[39;49;00m\r\n", " },\r\n", - " \u001b[94m\"spec\"\u001b[39;49;00m: {\r\n", - " \u001b[94m\"annotations\"\u001b[39;49;00m:{\u001b[94m\"seldon.io/engine-log-message-type\"\u001b[39;49;00m:\u001b[33m\"seldon.message.pair\"\u001b[39;49;00m,\u001b[94m\"seldon.io/engine-log-messages-externally\"\u001b[39;49;00m:\u001b[33m\"false\"\u001b[39;49;00m,\u001b[94m\"seldon.io/engine-log-requests\"\u001b[39;49;00m:\u001b[33m\"false\"\u001b[39;49;00m,\u001b[94m\"seldon.io/engine-log-responses\"\u001b[39;49;00m:\u001b[33m\"false\"\u001b[39;49;00m,\u001b[94m\"seldon.io/headless-svc\"\u001b[39;49;00m:\u001b[33m\"false\"\u001b[39;49;00m},\r\n", - " \u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"RELEASE-NAME\"\u001b[39;49;00m,\r\n", - " \u001b[94m\"predictors\"\u001b[39;49;00m: [\r\n", + " \u001b[34;01m\"spec\"\u001b[39;49;00m: {\r\n", + " \u001b[34;01m\"annotations\"\u001b[39;49;00m:{\u001b[34;01m\"seldon.io/engine-log-message-type\"\u001b[39;49;00m:\u001b[33m\"seldon.message.pair\"\u001b[39;49;00m,\u001b[34;01m\"seldon.io/engine-log-messages-externally\"\u001b[39;49;00m:\u001b[33m\"false\"\u001b[39;49;00m,\u001b[34;01m\"seldon.io/engine-log-requests\"\u001b[39;49;00m:\u001b[33m\"false\"\u001b[39;49;00m,\u001b[34;01m\"seldon.io/engine-log-responses\"\u001b[39;49;00m:\u001b[33m\"false\"\u001b[39;49;00m,\u001b[34;01m\"seldon.io/headless-svc\"\u001b[39;49;00m:\u001b[33m\"false\"\u001b[39;49;00m},\r\n", + " \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"RELEASE-NAME\"\u001b[39;49;00m,\r\n", + " \u001b[34;01m\"predictors\"\u001b[39;49;00m: [\r\n", " {\r\n", - " \u001b[94m\"componentSpecs\"\u001b[39;49;00m: [{\r\n", - " \u001b[94m\"spec\"\u001b[39;49;00m: {\r\n", - " \u001b[94m\"containers\"\u001b[39;49;00m: [\r\n", + " \u001b[34;01m\"componentSpecs\"\u001b[39;49;00m: [{\r\n", + " \u001b[34;01m\"spec\"\u001b[39;49;00m: {\r\n", + " \u001b[34;01m\"containers\"\u001b[39;49;00m: [\r\n", " {\r\n", - " \u001b[94m\"image\"\u001b[39;49;00m: \u001b[33m\"seldonio/mock_classifier:1.0\"\u001b[39;49;00m,\r\n", - " \u001b[94m\"imagePullPolicy\"\u001b[39;49;00m: \u001b[33m\"IfNotPresent\"\u001b[39;49;00m,\r\n", - " \u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"classifier\"\u001b[39;49;00m,\r\n", - " \u001b[94m\"resources\"\u001b[39;49;00m: {\u001b[94m\"requests\"\u001b[39;49;00m:{\u001b[94m\"memory\"\u001b[39;49;00m:\u001b[33m\"1Mi\"\u001b[39;49;00m}},\r\n", - " \u001b[94m\"env\"\u001b[39;49;00m: [\r\n", + " \u001b[34;01m\"image\"\u001b[39;49;00m: \u001b[33m\"seldonio/mock_classifier_rest:1.3\"\u001b[39;49;00m,\r\n", + " \u001b[34;01m\"imagePullPolicy\"\u001b[39;49;00m: \u001b[33m\"IfNotPresent\"\u001b[39;49;00m,\r\n", + " \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"classifier\"\u001b[39;49;00m,\r\n", + " \u001b[34;01m\"resources\"\u001b[39;49;00m: {\u001b[34;01m\"requests\"\u001b[39;49;00m:{\u001b[34;01m\"memory\"\u001b[39;49;00m:\u001b[33m\"1Mi\"\u001b[39;49;00m}},\r\n", + " \u001b[34;01m\"env\"\u001b[39;49;00m: [\r\n", "{\r\n", - "\u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"LOG_LEVEL\"\u001b[39;49;00m,\r\n", - "\u001b[94m\"value\"\u001b[39;49;00m: \u001b[33m\"INFO\"\u001b[39;49;00m\r\n", + "\u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"LOG_LEVEL\"\u001b[39;49;00m,\r\n", + "\u001b[34;01m\"value\"\u001b[39;49;00m: \u001b[33m\"INFO\"\u001b[39;49;00m\r\n", "},\r\n", "]\r\n", " }\r\n", " ],\r\n", - " \u001b[94m\"terminationGracePeriodSeconds\"\u001b[39;49;00m: \u001b[34m1\u001b[39;49;00m\r\n", + " \u001b[34;01m\"terminationGracePeriodSeconds\"\u001b[39;49;00m: \u001b[34m1\u001b[39;49;00m\r\n", " }}\r\n", " ],\r\n", - " \u001b[94m\"graph\"\u001b[39;49;00m:\r\n", + " \u001b[34;01m\"graph\"\u001b[39;49;00m:\r\n", " {\r\n", - " \u001b[94m\"children\"\u001b[39;49;00m: [],\r\n", - " \u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"classifier\"\u001b[39;49;00m,\r\n", - " \u001b[94m\"type\"\u001b[39;49;00m: \u001b[33m\"MODEL\"\u001b[39;49;00m,\r\n", - " \u001b[94m\"endpoint\"\u001b[39;49;00m: {\r\n", - " \u001b[94m\"type\"\u001b[39;49;00m: \u001b[33m\"REST\"\u001b[39;49;00m\r\n", + " \u001b[34;01m\"children\"\u001b[39;49;00m: [],\r\n", + " \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"classifier\"\u001b[39;49;00m,\r\n", + " \u001b[34;01m\"type\"\u001b[39;49;00m: \u001b[33m\"MODEL\"\u001b[39;49;00m,\r\n", + " \u001b[34;01m\"endpoint\"\u001b[39;49;00m: {\r\n", + " \u001b[34;01m\"type\"\u001b[39;49;00m: \u001b[33m\"REST\"\u001b[39;49;00m\r\n", " }},\r\n", - " \u001b[94m\"svcOrchSpec\"\u001b[39;49;00m: {\r\n", - " \u001b[94m\"resources\"\u001b[39;49;00m: {\u001b[94m\"requests\"\u001b[39;49;00m:{\u001b[94m\"cpu\"\u001b[39;49;00m:\u001b[33m\"0.1\"\u001b[39;49;00m}},\r\n", - "\u001b[94m\"env\"\u001b[39;49;00m: [\r\n", + " \u001b[34;01m\"svcOrchSpec\"\u001b[39;49;00m: {\r\n", + " \u001b[34;01m\"resources\"\u001b[39;49;00m: {\u001b[34;01m\"requests\"\u001b[39;49;00m:{\u001b[34;01m\"cpu\"\u001b[39;49;00m:\u001b[33m\"0.1\"\u001b[39;49;00m}},\r\n", + "\u001b[34;01m\"env\"\u001b[39;49;00m: [\r\n", "]\r\n", " },\r\n", - " \u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"RELEASE-NAME\"\u001b[39;49;00m,\r\n", - " \u001b[94m\"replicas\"\u001b[39;49;00m: \u001b[34m1\u001b[39;49;00m,\r\n", - " \u001b[94m\"labels\"\u001b[39;49;00m: {\u001b[94m\"fluentd\"\u001b[39;49;00m:\u001b[33m\"true\"\u001b[39;49;00m,\u001b[94m\"version\"\u001b[39;49;00m:\u001b[33m\"v1\"\u001b[39;49;00m}\r\n", + " \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"RELEASE-NAME\"\u001b[39;49;00m,\r\n", + " \u001b[34;01m\"replicas\"\u001b[39;49;00m: \u001b[34m1\u001b[39;49;00m,\r\n", + " \u001b[34;01m\"labels\"\u001b[39;49;00m: {\u001b[34;01m\"fluentd\"\u001b[39;49;00m:\u001b[33m\"true\"\u001b[39;49;00m,\u001b[34;01m\"version\"\u001b[39;49;00m:\u001b[33m\"v1\"\u001b[39;49;00m}\r\n", " }\r\n", " ]\r\n", " }\r\n", @@ -121,7 +147,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 10, "metadata": { "scrolled": true }, @@ -130,13 +156,13 @@ "name": "stdout", "output_type": "stream", "text": [ - "Waiting for deployment \"mymodel-mymodel-7cd068f\" rollout to finish: 0 of 1 updated replicas are available...\n", - "deployment \"mymodel-mymodel-7cd068f\" successfully rolled out\n" + "Waiting for deployment \"mymodel-mymodel-de240ba\" rollout to finish: 0 of 1 updated replicas are available...\n", + "deployment \"mymodel-mymodel-de240ba\" successfully rolled out\n" ] } ], "source": [ - "!kubectl rollout status deploy/mymodel-mymodel-7cd068f" + "!kubectl rollout status deploy/mymodel-mymodel-de240ba" ] }, { @@ -148,7 +174,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 11, "metadata": { "scrolled": true }, @@ -167,7 +193,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 12, "metadata": { "scrolled": true }, @@ -178,28 +204,25 @@ "text": [ "Success:True message:\n", "Request:\n", + "meta {\n", + "}\n", "data {\n", " tensor {\n", " shape: 1\n", " shape: 1\n", - " values: 0.5530090644075937\n", + " values: 0.6042750134087251\n", " }\n", "}\n", "\n", "Response:\n", "meta {\n", - " puid: \"2ncsc9151419nriqgpug2qj51v\"\n", - " requestPath {\n", - " key: \"classifier\"\n", - " value: \"seldonio/mock_classifier:1.0\"\n", - " }\n", "}\n", "data {\n", " names: \"proba\"\n", " tensor {\n", " shape: 1\n", " shape: 1\n", - " values: 0.08598629818481304\n", + " values: 0.0901018762156463\n", " }\n", "}\n", "\n" @@ -211,54 +234,98 @@ "print(r)" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### gRPC Request" - ] - }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 13, "metadata": { - "scrolled": false + "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Success:True message:\n", - "Request:\n", - "data {\n", - " tensor {\n", - " shape: 1\n", - " shape: 1\n", - " values: 0.4665452623419324\n", - " }\n", - "}\n", - "\n", - "Response:\n", - "meta {\n", - " puid: \"6rvp3tspp2e8ko5nha8op0gjq5\"\n", - " requestPath {\n", - " key: \"classifier\"\n", - " value: \"seldonio/mock_classifier:1.0\"\n", - " }\n", - "}\n", - "data {\n", - " names: \"proba\"\n", - " tensor {\n", - " shape: 1\n", - " shape: 1\n", - " values: 0.07942967157377967\n", - " }\n", - "}\n", - "\n" + "release \"mymodel\" uninstalled\r\n" ] } ], + "source": [ + "!helm delete mymodel" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Serve Single GRPC Model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "!helm install mymodel ../helm-charts/seldon-single-model --set protocol=GRPC" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "!helm template ../helm-charts/seldon-single-model --set protocol=GRPC | pygmentize -l json" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "!kubectl rollout status deploy/mymodel-mymodel-2a00e84" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Get predictions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "from seldon_core.seldon_client import SeldonClient\n", + "sc = SeldonClient(deployment_name=\"mymodel\",namespace=\"seldon\",gateway_endpoint=\"localhost:8003\",gateway=\"ambassador\", debug=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### REST Request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], "source": [ "r = sc.predict(transport=\"grpc\")\n", "print(r)" @@ -266,19 +333,11 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "release \"mymodel\" uninstalled\r\n" - ] - } - ], + "outputs": [], "source": [ "!helm delete mymodel" ] @@ -287,159 +346,41 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Serve AB Test" + "## Serve REST AB Test" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "NAME: myabtest\r\n", - "LAST DEPLOYED: Mon Dec 2 12:36:55 2019\r\n", - "NAMESPACE: seldon\r\n", - "STATUS: deployed\r\n", - "REVISION: 1\r\n", - "TEST SUITE: None\r\n" - ] - } - ], + "outputs": [], "source": [ "!helm install myabtest ../helm-charts/seldon-abtest" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[04m\u001b[91m-\u001b[39;49;00m\u001b[04m\u001b[91m-\u001b[39;49;00m\u001b[04m\u001b[91m-\u001b[39;49;00m\r\n", - "\u001b[04m\u001b[91m#\u001b[39;49;00m \u001b[04m\u001b[91mS\u001b[39;49;00m\u001b[04m\u001b[91mo\u001b[39;49;00m\u001b[04m\u001b[91mu\u001b[39;49;00m\u001b[04m\u001b[91mr\u001b[39;49;00m\u001b[04m\u001b[91mc\u001b[39;49;00m\u001b[04m\u001b[91me\u001b[39;49;00m\u001b[04m\u001b[91m:\u001b[39;49;00m \u001b[04m\u001b[91ms\u001b[39;49;00m\u001b[04m\u001b[91me\u001b[39;49;00m\u001b[04m\u001b[91ml\u001b[39;49;00m\u001b[04m\u001b[91md\u001b[39;49;00m\u001b[04m\u001b[91mo\u001b[39;49;00m\u001b[04m\u001b[91mn\u001b[39;49;00m\u001b[04m\u001b[91m-\u001b[39;49;00m\u001b[04m\u001b[91ma\u001b[39;49;00m\u001b[04m\u001b[91mb\u001b[39;49;00m\u001b[04m\u001b[91mt\u001b[39;49;00m\u001b[04m\u001b[91me\u001b[39;49;00m\u001b[04m\u001b[91ms\u001b[39;49;00m\u001b[04m\u001b[91mt\u001b[39;49;00m\u001b[04m\u001b[91m/\u001b[39;49;00m\u001b[04m\u001b[91mt\u001b[39;49;00m\u001b[04m\u001b[91me\u001b[39;49;00m\u001b[04m\u001b[91mm\u001b[39;49;00m\u001b[04m\u001b[91mp\u001b[39;49;00m\u001b[04m\u001b[91ml\u001b[39;49;00m\u001b[04m\u001b[91ma\u001b[39;49;00m\u001b[04m\u001b[91mt\u001b[39;49;00m\u001b[04m\u001b[91me\u001b[39;49;00m\u001b[04m\u001b[91ms\u001b[39;49;00m\u001b[04m\u001b[91m/\u001b[39;49;00m\u001b[04m\u001b[91ma\u001b[39;49;00m\u001b[04m\u001b[91mb\u001b[39;49;00m\u001b[04m\u001b[91m_\u001b[39;49;00m\u001b[04m\u001b[91mt\u001b[39;49;00m\u001b[04m\u001b[91me\u001b[39;49;00m\u001b[04m\u001b[91ms\u001b[39;49;00m\u001b[04m\u001b[91mt\u001b[39;49;00m\u001b[04m\u001b[91m_\u001b[39;49;00m\u001b[34m2\u001b[39;49;00m\u001b[04m\u001b[91mp\u001b[39;49;00m\u001b[04m\u001b[91mo\u001b[39;49;00m\u001b[04m\u001b[91md\u001b[39;49;00m\u001b[04m\u001b[91ms\u001b[39;49;00m\u001b[04m\u001b[91m.\u001b[39;49;00m\u001b[04m\u001b[91mj\u001b[39;49;00m\u001b[04m\u001b[91ms\u001b[39;49;00m\u001b[04m\u001b[91mo\u001b[39;49;00m\u001b[04m\u001b[91mn\u001b[39;49;00m\r\n", - "{\r\n", - " \u001b[94m\"apiVersion\"\u001b[39;49;00m: \u001b[33m\"machinelearning.seldon.io/v1alpha2\"\u001b[39;49;00m,\r\n", - " \u001b[94m\"kind\"\u001b[39;49;00m: \u001b[33m\"SeldonDeployment\"\u001b[39;49;00m,\r\n", - " \u001b[94m\"metadata\"\u001b[39;49;00m: {\r\n", - "\t\u001b[94m\"labels\"\u001b[39;49;00m: {\r\n", - "\t \u001b[94m\"app\"\u001b[39;49;00m: \u001b[33m\"seldon\"\u001b[39;49;00m\r\n", - "\t},\r\n", - "\t\u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"RELEASE-NAME\"\u001b[39;49;00m\r\n", - " },\r\n", - " \u001b[94m\"spec\"\u001b[39;49;00m: {\r\n", - "\t\u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"RELEASE-NAME\"\u001b[39;49;00m,\r\n", - "\t\u001b[94m\"predictors\"\u001b[39;49;00m: [\r\n", - "\t {\r\n", - "\t\t\u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"RELEASE-NAME\"\u001b[39;49;00m,\r\n", - "\t\t\u001b[94m\"replicas\"\u001b[39;49;00m: \u001b[34m1\u001b[39;49;00m,\r\n", - "\t\t\u001b[94m\"componentSpecs\"\u001b[39;49;00m: [{\r\n", - "\t\t \u001b[94m\"spec\"\u001b[39;49;00m: {\r\n", - "\t\t\t\u001b[94m\"containers\"\u001b[39;49;00m: [\r\n", - "\t\t\t {\r\n", - "\t\t\t\t\u001b[94m\"image\"\u001b[39;49;00m: \u001b[33m\"seldonio/mock_classifier:1.0\"\u001b[39;49;00m,\r\n", - "\t\t\t\t\u001b[94m\"imagePullPolicy\"\u001b[39;49;00m: \u001b[33m\"IfNotPresent\"\u001b[39;49;00m,\r\n", - "\t\t\t\t\u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"classifier-1\"\u001b[39;49;00m,\r\n", - "\t\t\t\t\u001b[94m\"resources\"\u001b[39;49;00m: {\r\n", - "\t\t\t\t \u001b[94m\"requests\"\u001b[39;49;00m: {\r\n", - "\t\t\t\t\t\u001b[94m\"memory\"\u001b[39;49;00m: \u001b[33m\"1Mi\"\u001b[39;49;00m\r\n", - "\t\t\t\t }\r\n", - "\t\t\t\t}\r\n", - "\t\t\t }],\r\n", - "\t\t\t\u001b[94m\"terminationGracePeriodSeconds\"\u001b[39;49;00m: \u001b[34m20\u001b[39;49;00m\r\n", - "\t\t }},\r\n", - "\t {\r\n", - "\t\t \u001b[94m\"metadata\"\u001b[39;49;00m:{\r\n", - "\t\t\t\u001b[94m\"labels\"\u001b[39;49;00m:{\r\n", - "\t\t\t \u001b[94m\"version\"\u001b[39;49;00m:\u001b[33m\"v2\"\u001b[39;49;00m\r\n", - "\t\t\t}\r\n", - "\t\t }, \r\n", - "\t\t\t\u001b[94m\"spec\"\u001b[39;49;00m:{\r\n", - "\t\t\t \u001b[94m\"containers\"\u001b[39;49;00m:[\r\n", - "\t\t\t {\r\n", - "\t\t\t\t\u001b[94m\"image\"\u001b[39;49;00m: \u001b[33m\"seldonio/mock_classifier:1.0\"\u001b[39;49;00m,\r\n", - "\t\t\t\t\u001b[94m\"imagePullPolicy\"\u001b[39;49;00m: \u001b[33m\"IfNotPresent\"\u001b[39;49;00m,\r\n", - "\t\t\t\t\u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"classifier-2\"\u001b[39;49;00m,\r\n", - "\t\t\t\t\u001b[94m\"resources\"\u001b[39;49;00m: {\r\n", - "\t\t\t\t \u001b[94m\"requests\"\u001b[39;49;00m: {\r\n", - "\t\t\t\t\t\u001b[94m\"memory\"\u001b[39;49;00m: \u001b[33m\"1Mi\"\u001b[39;49;00m\r\n", - "\t\t\t\t }\r\n", - "\t\t\t\t}\r\n", - "\t\t\t }\r\n", - "\t\t\t],\r\n", - "\t\t\t\u001b[94m\"terminationGracePeriodSeconds\"\u001b[39;49;00m: \u001b[34m20\u001b[39;49;00m\r\n", - "\t\t\t\t }\r\n", - "\t\t\t\t }],\r\n", - "\t\t\u001b[94m\"graph\"\u001b[39;49;00m: {\r\n", - "\t\t \u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"RELEASE-NAME\"\u001b[39;49;00m,\r\n", - "\t\t \u001b[94m\"endpoint\"\u001b[39;49;00m:{},\r\n", - "\t\t \u001b[94m\"implementation\"\u001b[39;49;00m:\u001b[33m\"RANDOM_ABTEST\"\u001b[39;49;00m,\r\n", - "\t\t \u001b[94m\"parameters\"\u001b[39;49;00m: [\r\n", - "\t\t\t{\r\n", - "\t\t\t \u001b[94m\"name\"\u001b[39;49;00m:\u001b[33m\"ratioA\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[94m\"value\"\u001b[39;49;00m:\u001b[33m\"0.5\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[94m\"type\"\u001b[39;49;00m:\u001b[33m\"FLOAT\"\u001b[39;49;00m\r\n", - "\t\t\t}\r\n", - "\t\t ],\r\n", - "\t\t \u001b[94m\"children\"\u001b[39;49;00m: [\r\n", - "\t\t\t{\r\n", - "\t\t\t \u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"classifier-1\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[94m\"endpoint\"\u001b[39;49;00m:{\r\n", - "\t\t\t\t\u001b[94m\"type\"\u001b[39;49;00m:\u001b[33m\"REST\"\u001b[39;49;00m\r\n", - "\t\t\t },\r\n", - "\t\t\t \u001b[94m\"type\"\u001b[39;49;00m:\u001b[33m\"MODEL\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[94m\"children\"\u001b[39;49;00m:[]\r\n", - "\t\t\t},\r\n", - "\t\t\t{\r\n", - "\t\t\t \u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"classifier-2\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[94m\"endpoint\"\u001b[39;49;00m:{\r\n", - "\t\t\t\t\u001b[94m\"type\"\u001b[39;49;00m:\u001b[33m\"REST\"\u001b[39;49;00m\r\n", - "\t\t\t },\r\n", - "\t\t\t \u001b[94m\"type\"\u001b[39;49;00m:\u001b[33m\"MODEL\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[94m\"children\"\u001b[39;49;00m:[]\r\n", - "\t\t\t} \r\n", - "\t\t ]\r\n", - "\t\t}\r\n", - "\t }\r\n", - "\t]\r\n", - " }\r\n", - "}\r\n" - ] - } - ], + "outputs": [], "source": [ "!helm template ../helm-charts/seldon-abtest | pygmentize -l json" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Waiting for deployment \"myabtest-myabtest-41de5b8\" rollout to finish: 0 of 1 updated replicas are available...\n", - "deployment \"myabtest-myabtest-41de5b8\" successfully rolled out\n", - "deployment \"myabtest-myabtest-df66c5c\" successfully rolled out\n" - ] - } - ], + "outputs": [], "source": [ - "!kubectl rollout status deploy/myabtest-myabtest-41de5b8\n", - "!kubectl rollout status deploy/myabtest-myabtest-df66c5c" + "!kubectl rollout status deploy/myabtest-myabtest-0cce7b2\n", + "!kubectl rollout status deploy/myabtest-myabtest-ba661ba" ] }, { @@ -451,7 +392,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": { "scrolled": true }, @@ -470,88 +411,101 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Success:True message:\n", - "Request:\n", - "data {\n", - " tensor {\n", - " shape: 1\n", - " shape: 1\n", - " values: 0.07586815841042194\n", - " }\n", - "}\n", - "\n", - "Response:\n", - "meta {\n", - " puid: \"vbon7q8d5omskl62oephp121es\"\n", - " routing {\n", - " key: \"myabtest\"\n", - " value: 1\n", - " }\n", - " requestPath {\n", - " key: \"classifier-2\"\n", - " value: \"seldonio/mock_classifier:1.0\"\n", - " }\n", - " requestPath {\n", - " key: \"myabtest\"\n", - " value: \"\"\n", - " }\n", - "}\n", - "data {\n", - " names: \"proba\"\n", - " tensor {\n", - " shape: 1\n", - " shape: 1\n", - " values: 0.05515890762001978\n", - " }\n", - "}\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "r = sc.predict(transport=\"rest\")\n", "print(r)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "!helm delete myabtest" + ] + }, { "cell_type": "markdown", "metadata": {}, "source": [ - "#### gRPC Request" + "## Serve GRPC AB Test" ] }, { "cell_type": "code", - "execution_count": 28, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "ename": "_Rendezvous", - "evalue": "<_Rendezvous of RPC that terminated with:\n\tstatus = StatusCode.UNIMPLEMENTED\n\tdetails = \"\"\n\tdebug_error_string = \"{\"created\":\"@1575290279.803196326\",\"description\":\"Error received from peer ipv6:[::1]:8003\",\"file\":\"src/core/lib/surface/call.cc\",\"file_line\":1055,\"grpc_message\":\"\",\"grpc_status\":12}\"\n>", - "output_type": "error", - "traceback": [ - "\u001b[0;31m--------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31m_Rendezvous\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mr\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtransport\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"grpc\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/anaconda3/envs/seldoncore/lib/python3.7/site-packages/seldon_core/seldon_client.py\u001b[0m in \u001b[0;36mpredict\u001b[0;34m(self, gateway, transport, deployment_name, payload_type, oauth_key, oauth_secret, seldon_rest_endpoint, seldon_grpc_endpoint, gateway_endpoint, microservice_endpoint, method, shape, namespace, data, bin_data, str_data, json_data, names, gateway_prefix, headers, http_path)\u001b[0m\n\u001b[1;32m 366\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mrest_predict_gateway\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 367\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mk\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"transport\"\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"grpc\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 368\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mgrpc_predict_gateway\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 369\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 370\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mSeldonClientException\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Unknown transport \"\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mk\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"transport\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/anaconda3/envs/seldoncore/lib/python3.7/site-packages/seldon_core/seldon_client.py\u001b[0m in \u001b[0;36mgrpc_predict_gateway\u001b[0;34m(deployment_name, namespace, gateway_endpoint, shape, data, headers, payload_type, bin_data, str_data, json_data, grpc_max_send_message_length, grpc_max_receive_message_length, names, call_credentials, channel_credentials, **kwargs)\u001b[0m\n\u001b[1;32m 1852\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mk\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mheaders\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1853\u001b[0m \u001b[0mmetadata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mheaders\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1854\u001b[0;31m \u001b[0mresponse\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mstub\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mPredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmetadata\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmetadata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1855\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mSeldonClientPrediction\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresponse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1856\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/anaconda3/envs/seldoncore/lib/python3.7/site-packages/grpc/_channel.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, request, timeout, metadata, credentials, wait_for_ready, compression)\u001b[0m\n\u001b[1;32m 688\u001b[0m state, call, = self._blocking(request, timeout, metadata, credentials,\n\u001b[1;32m 689\u001b[0m wait_for_ready, compression)\n\u001b[0;32m--> 690\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_end_unary_response_blocking\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstate\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcall\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 691\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 692\u001b[0m def with_call(self,\n", - "\u001b[0;32m~/anaconda3/envs/seldoncore/lib/python3.7/site-packages/grpc/_channel.py\u001b[0m in \u001b[0;36m_end_unary_response_blocking\u001b[0;34m(state, call, with_call, deadline)\u001b[0m\n\u001b[1;32m 590\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mstate\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mresponse\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 591\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 592\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0m_Rendezvous\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstate\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdeadline\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 593\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 594\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31m_Rendezvous\u001b[0m: <_Rendezvous of RPC that terminated with:\n\tstatus = StatusCode.UNIMPLEMENTED\n\tdetails = \"\"\n\tdebug_error_string = \"{\"created\":\"@1575290279.803196326\",\"description\":\"Error received from peer ipv6:[::1]:8003\",\"file\":\"src/core/lib/surface/call.cc\",\"file_line\":1055,\"grpc_message\":\"\",\"grpc_status\":12}\"\n>" - ] - } - ], + "outputs": [], + "source": [ + "!helm install myabtest ../helm-charts/seldon-abtest --set protocol=GRPC" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "!helm template ../helm-charts/seldon-abtest --set protocol=GRPC | pygmentize -l json" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "!kubectl rollout status deploy/myabtest-myabtest-050eba7\n", + "!kubectl rollout status deploy/myabtest-myabtest-5ffb9f0" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Get predictions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "from seldon_core.seldon_client import SeldonClient\n", + "sc = SeldonClient(deployment_name=\"myabtest\",namespace=\"seldon\",gateway_endpoint=\"localhost:8003\",gateway=\"ambassador\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### REST Request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], "source": [ "r = sc.predict(transport=\"grpc\")\n", "print(r)" @@ -559,19 +513,11 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "release \"myabtest\" uninstalled\r\n" - ] - } - ], + "outputs": [], "source": [ "!helm delete myabtest" ] @@ -580,194 +526,42 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Serve Multi-Armed Bandit" + "## Serve REST Multi-Armed Bandit" ] }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "metadata": { "scrolled": false }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "NAME: mymab\r\n", - "LAST DEPLOYED: Fri Nov 29 16:57:34 2019\r\n", - "NAMESPACE: seldon\r\n", - "STATUS: deployed\r\n", - "REVISION: 1\r\n", - "TEST SUITE: None\r\n" - ] - } - ], + "outputs": [], "source": [ "!helm install mymab ../helm-charts/seldon-mab" ] }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[04m\u001b[91m-\u001b[39;49;00m\u001b[04m\u001b[91m-\u001b[39;49;00m\u001b[04m\u001b[91m-\u001b[39;49;00m\r\n", - "\u001b[04m\u001b[91m#\u001b[39;49;00m \u001b[04m\u001b[91mS\u001b[39;49;00m\u001b[04m\u001b[91mo\u001b[39;49;00m\u001b[04m\u001b[91mu\u001b[39;49;00m\u001b[04m\u001b[91mr\u001b[39;49;00m\u001b[04m\u001b[91mc\u001b[39;49;00m\u001b[04m\u001b[91me\u001b[39;49;00m\u001b[04m\u001b[91m:\u001b[39;49;00m \u001b[04m\u001b[91ms\u001b[39;49;00m\u001b[04m\u001b[91me\u001b[39;49;00m\u001b[04m\u001b[91ml\u001b[39;49;00m\u001b[04m\u001b[91md\u001b[39;49;00m\u001b[04m\u001b[91mo\u001b[39;49;00m\u001b[04m\u001b[91mn\u001b[39;49;00m\u001b[04m\u001b[91m-\u001b[39;49;00m\u001b[04m\u001b[91mm\u001b[39;49;00m\u001b[04m\u001b[91ma\u001b[39;49;00m\u001b[04m\u001b[91mb\u001b[39;49;00m\u001b[04m\u001b[91m/\u001b[39;49;00m\u001b[04m\u001b[91mt\u001b[39;49;00m\u001b[04m\u001b[91me\u001b[39;49;00m\u001b[04m\u001b[91mm\u001b[39;49;00m\u001b[04m\u001b[91mp\u001b[39;49;00m\u001b[04m\u001b[91ml\u001b[39;49;00m\u001b[04m\u001b[91ma\u001b[39;49;00m\u001b[04m\u001b[91mt\u001b[39;49;00m\u001b[04m\u001b[91me\u001b[39;49;00m\u001b[04m\u001b[91ms\u001b[39;49;00m\u001b[04m\u001b[91m/\u001b[39;49;00m\u001b[04m\u001b[91mm\u001b[39;49;00m\u001b[04m\u001b[91ma\u001b[39;49;00m\u001b[04m\u001b[91mb\u001b[39;49;00m\u001b[04m\u001b[91m.\u001b[39;49;00m\u001b[04m\u001b[91mj\u001b[39;49;00m\u001b[04m\u001b[91ms\u001b[39;49;00m\u001b[04m\u001b[91mo\u001b[39;49;00m\u001b[04m\u001b[91mn\u001b[39;49;00m\r\n", - "{\r\n", - " \u001b[94m\"apiVersion\"\u001b[39;49;00m: \u001b[33m\"machinelearning.seldon.io/v1alpha2\"\u001b[39;49;00m,\r\n", - " \u001b[94m\"kind\"\u001b[39;49;00m: \u001b[33m\"SeldonDeployment\"\u001b[39;49;00m,\r\n", - " \u001b[94m\"metadata\"\u001b[39;49;00m: {\r\n", - "\t\t\u001b[94m\"labels\"\u001b[39;49;00m: {\u001b[94m\"app\"\u001b[39;49;00m:\u001b[33m\"seldon\"\u001b[39;49;00m},\r\n", - "\t\t\u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"RELEASE-NAME\"\u001b[39;49;00m\r\n", - " },\r\n", - " \u001b[94m\"spec\"\u001b[39;49;00m: {\r\n", - "\t\u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"RELEASE-NAME\"\u001b[39;49;00m,\r\n", - "\t\u001b[94m\"predictors\"\u001b[39;49;00m: [\r\n", - "\t {\r\n", - "\t\t\u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"mymab\"\u001b[39;49;00m,\r\n", - "\t\t\u001b[94m\"replicas\"\u001b[39;49;00m: \u001b[34m1\u001b[39;49;00m,\r\n", - "\t\t\u001b[94m\"componentSpecs\"\u001b[39;49;00m: [{\r\n", - "\t\t \u001b[94m\"spec\"\u001b[39;49;00m: {\r\n", - "\t\t\t\u001b[94m\"containers\"\u001b[39;49;00m: [\r\n", - "\t\t\t {\r\n", - "\t\t\t\t\u001b[94m\"image\"\u001b[39;49;00m: \u001b[33m\"seldonio/mock_classifier:1.0\"\u001b[39;49;00m,\r\n", - "\t\t\t\t\u001b[94m\"imagePullPolicy\"\u001b[39;49;00m: \u001b[33m\"IfNotPresent\"\u001b[39;49;00m,\r\n", - "\t\t\t\t\u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"classifier-1\"\u001b[39;49;00m,\r\n", - "\t\t\t\t\u001b[94m\"resources\"\u001b[39;49;00m: {\r\n", - "\t\t\t\t \u001b[94m\"requests\"\u001b[39;49;00m: {\r\n", - "\t\t\t\t\t\u001b[94m\"memory\"\u001b[39;49;00m: \u001b[33m\"1Mi\"\u001b[39;49;00m\r\n", - "\t\t\t\t }\r\n", - "\t\t\t\t}\r\n", - "\t\t\t }],\r\n", - "\t\t\t\u001b[94m\"terminationGracePeriodSeconds\"\u001b[39;49;00m: \u001b[34m20\u001b[39;49;00m\r\n", - "\t\t }},\r\n", - "\t {\r\n", - "\t\t\t\u001b[94m\"spec\"\u001b[39;49;00m:{\r\n", - "\t\t\t \u001b[94m\"containers\"\u001b[39;49;00m:[\r\n", - "\t\t\t {\r\n", - "\t\t\t\t\u001b[94m\"image\"\u001b[39;49;00m: \u001b[33m\"seldonio/mock_classifier:1.0\"\u001b[39;49;00m,\r\n", - "\t\t\t\t\u001b[94m\"imagePullPolicy\"\u001b[39;49;00m: \u001b[33m\"IfNotPresent\"\u001b[39;49;00m,\r\n", - "\t\t\t\t\u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"classifier-2\"\u001b[39;49;00m,\r\n", - "\t\t\t\t\u001b[94m\"resources\"\u001b[39;49;00m: {\r\n", - "\t\t\t\t \u001b[94m\"requests\"\u001b[39;49;00m: {\r\n", - "\t\t\t\t\t\u001b[94m\"memory\"\u001b[39;49;00m: \u001b[33m\"1Mi\"\u001b[39;49;00m\r\n", - "\t\t\t\t }\r\n", - "\t\t\t\t}\r\n", - "\t\t\t }\r\n", - "\t\t\t],\r\n", - "\t\t\t\u001b[94m\"terminationGracePeriodSeconds\"\u001b[39;49;00m: \u001b[34m20\u001b[39;49;00m\r\n", - "\t\t\t}\r\n", - "\t\t},\r\n", - "\t {\r\n", - "\t\t \u001b[94m\"spec\"\u001b[39;49;00m:{\r\n", - "\t\t\t\u001b[94m\"containers\"\u001b[39;49;00m: [{\r\n", - "\t\t\t \u001b[94m\"image\"\u001b[39;49;00m: \u001b[33m\"seldonio/mab_epsilon_greedy:1.1\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"eg-router\"\u001b[39;49;00m\r\n", - "\t\t\t}],\r\n", - "\t\t\t\u001b[94m\"terminationGracePeriodSeconds\"\u001b[39;49;00m: \u001b[34m20\u001b[39;49;00m\r\n", - "\t\t }}\r\n", - "\t ],\r\n", - "\t\t\u001b[94m\"graph\"\u001b[39;49;00m: {\r\n", - "\t\t \u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"eg-router\"\u001b[39;49;00m,\r\n", - "\t\t \u001b[94m\"type\"\u001b[39;49;00m:\u001b[33m\"ROUTER\"\u001b[39;49;00m,\r\n", - "\t\t \u001b[94m\"parameters\"\u001b[39;49;00m: [\r\n", - "\t\t\t{\r\n", - "\t\t\t \u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"n_branches\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[94m\"value\"\u001b[39;49;00m: \u001b[33m\"2\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[94m\"type\"\u001b[39;49;00m: \u001b[33m\"INT\"\u001b[39;49;00m\r\n", - "\t\t\t},\r\n", - "\t\t\t{\r\n", - "\t\t\t \u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"epsilon\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[94m\"value\"\u001b[39;49;00m: \u001b[33m\"0.2\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[94m\"type\"\u001b[39;49;00m: \u001b[33m\"FLOAT\"\u001b[39;49;00m\r\n", - "\t\t\t},\r\n", - "\t\t\t{\r\n", - "\t\t\t \u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"verbose\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[94m\"value\"\u001b[39;49;00m: \u001b[33m\"1\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[94m\"type\"\u001b[39;49;00m: \u001b[33m\"BOOL\"\u001b[39;49;00m\r\n", - "\t\t\t}\r\n", - "\t\t ],\r\n", - "\t\t \u001b[94m\"children\"\u001b[39;49;00m: [\r\n", - "\t\t\t{\r\n", - "\t\t\t \u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"classifier-1\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[94m\"endpoint\"\u001b[39;49;00m:{\r\n", - "\t\t\t\t\u001b[94m\"type\"\u001b[39;49;00m:\u001b[33m\"REST\"\u001b[39;49;00m\r\n", - "\t\t\t },\r\n", - "\t\t\t \u001b[94m\"type\"\u001b[39;49;00m:\u001b[33m\"MODEL\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[94m\"children\"\u001b[39;49;00m:[]\r\n", - "\t\t\t},\r\n", - "\t\t\t{\r\n", - "\t\t\t \u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"classifier-2\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[94m\"endpoint\"\u001b[39;49;00m:{\r\n", - "\t\t\t\t\u001b[94m\"type\"\u001b[39;49;00m:\u001b[33m\"REST\"\u001b[39;49;00m\r\n", - "\t\t\t },\r\n", - "\t\t\t \u001b[94m\"type\"\u001b[39;49;00m:\u001b[33m\"MODEL\"\u001b[39;49;00m,\r\n", - "\t\t\t \u001b[94m\"children\"\u001b[39;49;00m:[]\r\n", - "\t\t\t} \r\n", - "\t\t ]\r\n", - "\t\t},\r\n", - "\t\t\u001b[94m\"svcOrchSpec\"\u001b[39;49;00m: {\r\n", - "\t\t\u001b[94m\"resources\"\u001b[39;49;00m: {\u001b[94m\"requests\"\u001b[39;49;00m:{\u001b[94m\"cpu\"\u001b[39;49;00m:\u001b[33m\"0.1\"\u001b[39;49;00m}},\r\n", - "\u001b[94m\"env\"\u001b[39;49;00m: [\r\n", - "{\r\n", - "\u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"SELDON_LOG_MESSAGES_EXTERNALLY\"\u001b[39;49;00m,\r\n", - "\u001b[94m\"value\"\u001b[39;49;00m: \u001b[33m\"false\"\u001b[39;49;00m\r\n", - "},\r\n", - "{\r\n", - "\u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"SELDON_LOG_MESSAGE_TYPE\"\u001b[39;49;00m,\r\n", - "\u001b[94m\"value\"\u001b[39;49;00m: \u001b[33m\"seldon.message.pair\"\u001b[39;49;00m\r\n", - "},\r\n", - "{\r\n", - "\u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"SELDON_LOG_REQUESTS\"\u001b[39;49;00m,\r\n", - "\u001b[94m\"value\"\u001b[39;49;00m: \u001b[33m\"false\"\u001b[39;49;00m\r\n", - "},\r\n", - "{\r\n", - "\u001b[94m\"name\"\u001b[39;49;00m: \u001b[33m\"SELDON_LOG_RESPONSES\"\u001b[39;49;00m,\r\n", - "\u001b[94m\"value\"\u001b[39;49;00m: \u001b[33m\"false\"\u001b[39;49;00m\r\n", - "},\r\n", - "]\r\n", - "},\r\n", - "\t\t\u001b[94m\"labels\"\u001b[39;49;00m: {\u001b[94m\"fluentd\"\u001b[39;49;00m:\u001b[33m\"true\"\u001b[39;49;00m,\u001b[94m\"version\"\u001b[39;49;00m:\u001b[33m\"v1\"\u001b[39;49;00m}\r\n", - "\t }\r\n", - "\t]\r\n", - " }\r\n", - "}\r\n" - ] - } - ], + "outputs": [], "source": [ "!helm template ../helm-charts/seldon-mab | pygmentize -l json" ] }, { "cell_type": "code", - "execution_count": 23, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Waiting for deployment \"mymab-mymab-41de5b8\" rollout to finish: 0 of 1 updated replicas are available...\n", - "deployment \"mymab-mymab-41de5b8\" successfully rolled out\n", - "deployment \"mymab-mymab-b8038b2\" successfully rolled out\n", - "deployment \"mymab-mymab-df66c5c\" successfully rolled out\n" - ] - } - ], + "outputs": [], "source": [ - "!kubectl rollout status deploy/mymab-mymab-41de5b8\n", + "!kubectl rollout status deploy/mymab-mymab-0cce7b2\n", "!kubectl rollout status deploy/mymab-mymab-b8038b2\n", - "!kubectl rollout status deploy/mymab-mymab-df66c5c " + "!kubectl rollout status deploy/mymab-mymab-ba661ba " ] }, { @@ -779,7 +573,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": null, "metadata": { "scrolled": true }, @@ -798,114 +592,102 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Success:True message:\n", - "Request:\n", - "data {\n", - " tensor {\n", - " shape: 1\n", - " shape: 1\n", - " values: 0.44362253786506456\n", - " }\n", - "}\n", - "\n", - "Response:\n", - "meta {\n", - " puid: \"vcitd8rqtq3jo3pi8btsp09o4e\"\n", - " routing {\n", - " key: \"eg-router\"\n", - " value: 0\n", - " }\n", - " requestPath {\n", - " key: \"classifier-1\"\n", - " value: \"seldonio/mock_classifier:1.0\"\n", - " }\n", - " requestPath {\n", - " key: \"eg-router\"\n", - " value: \"seldonio/mab_epsilon_greedy:1.1\"\n", - " }\n", - "}\n", - "data {\n", - " names: \"proba\"\n", - " tensor {\n", - " shape: 1\n", - " shape: 1\n", - " values: 0.07776962479808773\n", - " }\n", - "}\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "r = sc.predict(transport=\"rest\")\n", "print(r)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "!helm delete mymab" + ] + }, { "cell_type": "markdown", "metadata": {}, "source": [ - "#### gRPC Request" + "## Serve GRPC Multi-Armed Bandit" ] }, { "cell_type": "code", - "execution_count": 26, + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "!helm install mymab ../helm-charts/seldon-mab --set protocol=GRPC" + ] + }, + { + "cell_type": "code", + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Success:True message:\n", - "Request:\n", - "data {\n", - " tensor {\n", - " shape: 1\n", - " shape: 1\n", - " values: 0.26333382689168205\n", - " }\n", - "}\n", - "\n", - "Response:\n", - "meta {\n", - " puid: \"7fsq1k67apa006u3he5ukm7eo2\"\n", - " routing {\n", - " key: \"eg-router\"\n", - " value: 1\n", - " }\n", - " requestPath {\n", - " key: \"classifier-2\"\n", - " value: \"seldonio/mock_classifier:1.0\"\n", - " }\n", - " requestPath {\n", - " key: \"eg-router\"\n", - " value: \"seldonio/mab_epsilon_greedy:1.1\"\n", - " }\n", - "}\n", - "data {\n", - " names: \"proba\"\n", - " tensor {\n", - " shape: 1\n", - " shape: 1\n", - " values: 0.06578388860602263\n", - " }\n", - "}\n", - "\n" - ] - } - ], + "outputs": [], + "source": [ + "!helm template ../helm-charts/seldon-mab --set protocol=GRPC | pygmentize -l json" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "!kubectl rollout status deploy/mymab-mymab-050eba7\n", + "!kubectl rollout status deploy/mymab-mymab-5f10a24\n", + "!kubectl rollout status deploy/mymab-mymab-5ffb9f0 " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Get predictions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "from seldon_core.seldon_client import SeldonClient\n", + "sc = SeldonClient(deployment_name=\"mymab\",namespace=\"seldon\",gateway_endpoint=\"localhost:8003\",gateway=\"ambassador\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### REST Request" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], "source": [ "r = sc.predict(transport=\"grpc\")\n", "print(r)" @@ -913,22 +695,21 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "release \"mymab\" uninstalled\r\n" - ] - } - ], + "outputs": [], "source": [ "!helm delete mymab" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -948,7 +729,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.5" + "version": "3.6.8" }, "varInspector": { "cols": { diff --git a/notebooks/protocol_examples.ipynb b/notebooks/protocol_examples.ipynb new file mode 100644 index 0000000000..a5cb8dd0f4 --- /dev/null +++ b/notebooks/protocol_examples.ipynb @@ -0,0 +1,279 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Basic Examples with Different Protocols\n", + "\n", + "## Prerequisites\n", + "\n", + " * A kubernetes cluster with kubectl configured\n", + " * curl\n", + " * grpcurl\n", + " * pygmentize\n", + " \n", + "\n", + "## Setup Seldon Core\n", + "\n", + "Use the setup notebook to [Setup Cluster](seldon_core_setup.ipynb) to setup Seldon Core with an ingress - either Ambassador or Istio.\n", + "\n", + "Then port-forward to that ingress on localhost:8003 in a separate terminal either with:\n", + "\n", + " * Ambassador: `kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080`\n", + " * Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Seldon Protocol REST Model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pygmentize resources/model_seldon_rest.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl apply -f resources/model_seldon_rest.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=restseldon-rest-seldon \\\n", + " -o jsonpath='{.items[0].metadata.name}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!curl -v -d '{\"data\": {\"ndarray\":[[1.0, 2.0, 5.0]]}}' \\\n", + " -X POST http://localhost:8003/seldon/seldon/rest-seldon/api/v1.0/predictions \\\n", + " -H \"Content-Type: application/json\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl delete -f resources/model_seldon_rest.yaml" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Seldon Protocol GRPC Model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pygmentize resources/model_seldon_grpc.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl apply -f resources/model_seldon_grpc.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=grpcseldon-grpc-seldon \\\n", + " -o jsonpath='{.items[0].metadata.name}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!cd ../executor/proto && grpcurl -d '{\"data\":{\"ndarray\":[[1.0,2.0]]}}' \\\n", + " -rpc-header seldon:grpc-seldon -rpc-header namespace:seldon \\\n", + " -plaintext \\\n", + " -proto ./prediction.proto 0.0.0.0:8003 seldon.protos.Seldon/Predict" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl delete -f resources/model_seldon_grpc.yaml" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Tensorflow Protocol REST Model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pygmentize resources/model_tfserving_rest.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl apply -f resources/model_tfserving_rest.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=resttfserving-rest-tfserving \\\n", + " -o jsonpath='{.items[0].metadata.name}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!curl -d '{\"instances\": [1.0, 2.0, 5.0]}' \\\n", + " -X POST http://localhost:8003/seldon/seldon/rest-tfserving/v1/models/halfplustwo/:predict \\\n", + " -H \"Content-Type: application/json\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl delete -f resources/model_tfserving_rest.yaml" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Tensorflow Protocol GRPC Model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pygmentize resources/model_tfserving_grpc.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl apply -f resources/model_tfserving_grpc.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=grpctfserving-grpc-tfserving \\\n", + " -o jsonpath='{.items[0].metadata.name}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!cd ../executor/proto && grpcurl \\\n", + " -d '{\"model_spec\":{\"name\":\"halfplustwo\"},\"inputs\":{\"x\":{\"dtype\": 1, \"tensor_shape\": {\"dim\":[{\"size\": 3}]}, \"floatVal\" : [1.0, 2.0, 3.0]}}}' \\\n", + " -rpc-header seldon:grpc-tfserving -rpc-header namespace:seldon \\\n", + " -plaintext -proto ./prediction_service.proto \\\n", + " 0.0.0.0:8003 tensorflow.serving.PredictionService/Predict" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl delete -f resources/model_tfserving_grpc.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/resources/model.yaml b/notebooks/resources/model.yaml index c6c91e1d84..9bbb6f64e0 100644 --- a/notebooks/resources/model.yaml +++ b/notebooks/resources/model.yaml @@ -8,7 +8,7 @@ spec: - componentSpecs: - spec: containers: - - image: seldonio/mock_classifier:1.0 + - image: seldonio/mock_classifier_rest:1.3 name: classifier graph: children: [] diff --git a/notebooks/resources/model_seldon_grpc.yaml b/notebooks/resources/model_seldon_grpc.yaml new file mode 100644 index 0000000000..49b56459cc --- /dev/null +++ b/notebooks/resources/model_seldon_grpc.yaml @@ -0,0 +1,21 @@ +apiVersion: machinelearning.seldon.io/v1 +kind: SeldonDeployment +metadata: + name: grpc-seldon +spec: + name: grpcseldon + predictors: + - protocol: seldon + transport: grpc + componentSpecs: + - spec: + containers: + - image: seldonio/mock_classifier_grpc:1.3 + name: classifier + graph: + name: classifier + type: MODEL + endpoint: + type: GRPC + name: model + replicas: 1 diff --git a/notebooks/resources/model_seldon_rest.yaml b/notebooks/resources/model_seldon_rest.yaml new file mode 100644 index 0000000000..f00f0d07f8 --- /dev/null +++ b/notebooks/resources/model_seldon_rest.yaml @@ -0,0 +1,19 @@ +apiVersion: machinelearning.seldon.io/v1 +kind: SeldonDeployment +metadata: + name: rest-seldon +spec: + name: restseldon + predictors: + - protocol: seldon + transport: rest + componentSpecs: + - spec: + containers: + - image: seldonio/mock_classifier_rest:1.3 + name: classifier + graph: + name: classifier + type: MODEL + name: model + replicas: 1 diff --git a/notebooks/resources/model_tfserving_grpc.yaml b/notebooks/resources/model_tfserving_grpc.yaml new file mode 100644 index 0000000000..026217ad5f --- /dev/null +++ b/notebooks/resources/model_tfserving_grpc.yaml @@ -0,0 +1,30 @@ +apiVersion: machinelearning.seldon.io/v1 +kind: SeldonDeployment +metadata: + name: grpc-tfserving +spec: + name: grpctfserving + predictors: + - protocol: tensorflow + transport: grpc + componentSpecs: + - spec: + containers: + - args: + - --port=8500 + - --rest_api_port=8501 + - --model_name=halfplustwo + - --model_base_path=gs://seldon-models/tfserving/half_plus_two + image: tensorflow/serving + name: halfplustwo + ports: + - containerPort: 8500 + name: grpc + graph: + name: halfplustwo + type: MODEL + endpoint: + service_port: 8500 + type: GRPC + name: model + replicas: 1 diff --git a/notebooks/resources/model_tfserving_rest.yaml b/notebooks/resources/model_tfserving_rest.yaml new file mode 100644 index 0000000000..4d7b6e7df5 --- /dev/null +++ b/notebooks/resources/model_tfserving_rest.yaml @@ -0,0 +1,29 @@ +apiVersion: machinelearning.seldon.io/v1 +kind: SeldonDeployment +metadata: + name: rest-tfserving +spec: + name: resttfserving + predictors: + - protocol: tensorflow + transport: rest + componentSpecs: + - spec: + containers: + - args: + - --port=8500 + - --rest_api_port=8501 + - --model_name=halfplustwo + - --model_base_path=gs://seldon-models/tfserving/half_plus_two + image: tensorflow/serving + name: halfplustwo + ports: + - containerPort: 8501 + name: http + graph: + name: halfplustwo + type: MODEL + endpoint: + service_port: 8501 + name: model + replicas: 1 diff --git a/notebooks/seldon_core_setup.ipynb b/notebooks/seldon_core_setup.ipynb index 03d1e88d36..34985556b0 100644 --- a/notebooks/seldon_core_setup.ipynb +++ b/notebooks/seldon_core_setup.ipynb @@ -29,20 +29,36 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "namespace/seldon created\r\n" + ] + } + ], "source": [ "!kubectl create namespace seldon" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": { "scrolled": true }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Context \"kind-kind\" modified.\r\n" + ] + } + ], "source": [ "!kubectl config set-context $(kubectl config current-context) --namespace=seldon" ] @@ -135,27 +151,88 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\"stable\" has been added to your repositories\r\n" + ] + } + ], "source": [ "!helm repo add stable https://kubernetes-charts.storage.googleapis.com/" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Hang tight while we grab the latest from your chart repositories...\n", + "...Successfully got an update from the \"seldon-staging\" chart repository\n", + "...Successfully got an update from the \"stable\" chart repository\n", + "Update Complete. โŽˆ Happy Helming!โŽˆ \n" + ] + } + ], "source": [ "!helm repo update" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "manifest_sorter.go:175: info: skipping unknown hook: \"crd-install\"\n", + "manifest_sorter.go:175: info: skipping unknown hook: \"crd-install\"\n", + "manifest_sorter.go:175: info: skipping unknown hook: \"crd-install\"\n", + "manifest_sorter.go:175: info: skipping unknown hook: \"crd-install\"\n", + "manifest_sorter.go:175: info: skipping unknown hook: \"crd-install\"\n", + "manifest_sorter.go:175: info: skipping unknown hook: \"crd-install\"\n", + "manifest_sorter.go:175: info: skipping unknown hook: \"crd-install\"\n", + "manifest_sorter.go:175: info: skipping unknown hook: \"crd-install\"\n", + "manifest_sorter.go:175: info: skipping unknown hook: \"crd-install\"\n", + "manifest_sorter.go:175: info: skipping unknown hook: \"crd-install\"\n", + "manifest_sorter.go:175: info: skipping unknown hook: \"crd-install\"\n", + "manifest_sorter.go:175: info: skipping unknown hook: \"crd-install\"\n", + "manifest_sorter.go:175: info: skipping unknown hook: \"crd-install\"\n", + "manifest_sorter.go:175: info: skipping unknown hook: \"crd-install\"\n", + "NAME: ambassador\n", + "LAST DEPLOYED: Sat Jan 4 10:11:16 2020\n", + "NAMESPACE: seldon\n", + "STATUS: deployed\n", + "REVISION: 1\n", + "NOTES:\n", + "Congratulations! You've successfully installed Ambassador.\n", + "\n", + "For help, visit our Slack at https://d6e.co/slack or view the documentation online at https://www.getambassador.io.\n", + "\n", + "To get the IP address of Ambassador, run the following commands:\n", + "NOTE: It may take a few minutes for the LoadBalancer IP to be available.\n", + " You can watch the status of by running 'kubectl get svc -w --namespace seldon ambassador'\n", + "\n", + " On GKE/Azure:\n", + " export SERVICE_IP=$(kubectl get svc --namespace seldon ambassador -o jsonpath='{.status.loadBalancer.ingress[0].ip}')\n", + "\n", + " On AWS:\n", + " export SERVICE_IP=$(kubectl get svc --namespace seldon ambassador -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')\n", + "\n", + " echo http://$SERVICE_IP:\n" + ] + } + ], "source": [ "!helm install ambassador stable/ambassador --set crds.keep=false" ] @@ -169,9 +246,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Waiting for deployment \"ambassador\" rollout to finish: 0 of 3 updated replicas are available...\n", + "Waiting for deployment \"ambassador\" rollout to finish: 1 of 3 updated replicas are available...\n", + "Waiting for deployment \"ambassador\" rollout to finish: 2 of 3 updated replicas are available...\n", + "deployment \"ambassador\" successfully rolled out\n" + ] + } + ], "source": [ "!kubectl rollout status deployment.apps/ambassador" ] @@ -180,7 +268,22 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Forwarding from 127.0.0.1:8003 -> 8080\n", + "Forwarding from [::1]:8003 -> 8080\n", + "Handling connection for 8003\n", + "Handling connection for 8003\n", + "Handling connection for 8003\n", + "Handling connection for 8003\n", + "Handling connection for 8003\n", + "Handling connection for 8003\n" + ] + } + ], "source": [ "!kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080" ] @@ -215,7 +318,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.5" + "version": "3.6.8" } }, "nbformat": 4, diff --git a/notebooks/server_examples.ipynb b/notebooks/server_examples.ipynb index c7e8175450..771f3f410e 100644 --- a/notebooks/server_examples.ipynb +++ b/notebooks/server_examples.ipynb @@ -25,82 +25,40 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": { "scrolled": false }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[94mapiVersion\u001b[39;49;00m: machinelearning.seldon.io/v1alpha2\r\n", - "\u001b[94mkind\u001b[39;49;00m: SeldonDeployment\r\n", - "\u001b[94mmetadata\u001b[39;49;00m:\r\n", - " \u001b[94mname\u001b[39;49;00m: sklearn\r\n", - "\u001b[94mspec\u001b[39;49;00m:\r\n", - " \u001b[94mname\u001b[39;49;00m: iris\r\n", - " \u001b[94mpredictors\u001b[39;49;00m:\r\n", - " - \u001b[94mgraph\u001b[39;49;00m:\r\n", - " \u001b[94mchildren\u001b[39;49;00m: []\r\n", - " \u001b[94mimplementation\u001b[39;49;00m: SKLEARN_SERVER\r\n", - " \u001b[94mmodelUri\u001b[39;49;00m: gs://seldon-models/sklearn/iris\r\n", - " \u001b[94mname\u001b[39;49;00m: classifier\r\n", - " \u001b[94mname\u001b[39;49;00m: default\r\n", - " \u001b[94mreplicas\u001b[39;49;00m: 1\r\n", - " \u001b[94msvcOrchSpec\u001b[39;49;00m: \r\n", - " \u001b[94menv\u001b[39;49;00m: \r\n", - " - \u001b[94mname\u001b[39;49;00m: SELDON_LOG_LEVEL\r\n", - " \u001b[94mvalue\u001b[39;49;00m: DEBUG\r\n" - ] - } - ], + "outputs": [], "source": [ "!pygmentize ../servers/sklearnserver/samples/iris.yaml" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": { "scrolled": false }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "seldondeployment.machinelearning.seldon.io/sklearn created\r\n" - ] - } - ], + "outputs": [], "source": [ "!kubectl apply -f ../servers/sklearnserver/samples/iris.yaml" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": { "scrolled": false }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Waiting for deployment \"iris-default-4903e3c\" rollout to finish: 0 of 1 updated replicas are available...\n", - "deployment \"iris-default-4903e3c\" successfully rolled out\n" - ] - } - ], + "outputs": [], "source": [ "!kubectl rollout status deploy/iris-default-4903e3c" ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": { "scrolled": false }, @@ -112,52 +70,11 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Success:True message:\n", - "Request:\n", - "data {\n", - " tensor {\n", - " shape: 1\n", - " shape: 4\n", - " values: 0.5532809728679123\n", - " values: 0.4342251063258179\n", - " values: 0.06149633665408394\n", - " values: 0.4239944325524071\n", - " }\n", - "}\n", - "\n", - "Response:\n", - "meta {\n", - " puid: \"vf7s9q31lojvtoou0s593o2amq\"\n", - " requestPath {\n", - " key: \"classifier\"\n", - " value: \"seldonio/sklearnserver_rest:0.2\"\n", - " }\n", - "}\n", - "data {\n", - " names: \"t:0\"\n", - " names: \"t:1\"\n", - " names: \"t:2\"\n", - " tensor {\n", - " shape: 1\n", - " shape: 3\n", - " values: 0.48054487796869\n", - " values: 0.39136042893422457\n", - " values: 0.12809469309708543\n", - " }\n", - "}\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "r = sc.predict(gateway=\"ambassador\",transport=\"rest\",shape=(1,4))\n", "print(r)" @@ -165,19 +82,11 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": { "scrolled": false }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "seldondeployment.machinelearning.seldon.io \"sklearn\" deleted\r\n" - ] - } - ], + "outputs": [], "source": [ "!kubectl delete -f ../servers/sklearnserver/samples/iris.yaml" ] @@ -191,78 +100,40 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[94mapiVersion\u001b[39;49;00m: machinelearning.seldon.io/v1alpha2\r\n", - "\u001b[94mkind\u001b[39;49;00m: SeldonDeployment\r\n", - "\u001b[94mmetadata\u001b[39;49;00m:\r\n", - " \u001b[94mname\u001b[39;49;00m: xgboost\r\n", - "\u001b[94mspec\u001b[39;49;00m:\r\n", - " \u001b[94mname\u001b[39;49;00m: iris\r\n", - " \u001b[94mpredictors\u001b[39;49;00m:\r\n", - " - \u001b[94mgraph\u001b[39;49;00m:\r\n", - " \u001b[94mchildren\u001b[39;49;00m: []\r\n", - " \u001b[94mimplementation\u001b[39;49;00m: XGBOOST_SERVER\r\n", - " \u001b[94mmodelUri\u001b[39;49;00m: gs://seldon-models/xgboost/iris\r\n", - " \u001b[94mname\u001b[39;49;00m: classifier\r\n", - " \u001b[94mname\u001b[39;49;00m: default\r\n", - " \u001b[94mreplicas\u001b[39;49;00m: 1\r\n" - ] - } - ], + "outputs": [], "source": [ "!pygmentize ../servers/xgboostserver/samples/iris.yaml" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": { "scrolled": false }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "seldondeployment.machinelearning.seldon.io/xgboost created\r\n" - ] - } - ], + "outputs": [], "source": [ "!kubectl apply -f ../servers/xgboostserver/samples/iris.yaml" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": { "scrolled": false }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Waiting for deployment \"iris-default-af1783b\" rollout to finish: 0 of 1 updated replicas are available...\n", - "deployment \"iris-default-af1783b\" successfully rolled out\n" - ] - } - ], + "outputs": [], "source": [ "!kubectl rollout status deploy/iris-default-af1783b" ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": { "scrolled": false }, @@ -274,46 +145,11 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Success:True message:\n", - "Request:\n", - "data {\n", - " tensor {\n", - " shape: 1\n", - " shape: 4\n", - " values: 0.8515452360370743\n", - " values: 0.5935107411486708\n", - " values: 0.5544182060177882\n", - " values: 0.9355298659457035\n", - " }\n", - "}\n", - "\n", - "Response:\n", - "meta {\n", - " puid: \"u1vhv4n378m1ai5uu5iavp5b53\"\n", - " requestPath {\n", - " key: \"classifier\"\n", - " value: \"seldonio/xgboostserver_rest:0.2\"\n", - " }\n", - "}\n", - "data {\n", - " tensor {\n", - " shape: 1\n", - " values: 0.0\n", - " }\n", - "}\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "r = sc.predict(gateway=\"ambassador\",transport=\"rest\",shape=(1,4))\n", "print(r)" @@ -321,19 +157,11 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": { "scrolled": false }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "seldondeployment.machinelearning.seldon.io \"xgboost\" deleted\r\n" - ] - } - ], + "outputs": [], "source": [ "!kubectl delete -f ../servers/xgboostserver/samples/iris.yaml" ] @@ -349,84 +177,40 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[94mapiVersion\u001b[39;49;00m: machinelearning.seldon.io/v1alpha2\r\n", - "\u001b[94mkind\u001b[39;49;00m: SeldonDeployment\r\n", - "\u001b[94mmetadata\u001b[39;49;00m:\r\n", - " \u001b[94mname\u001b[39;49;00m: tfserving\r\n", - "\u001b[94mspec\u001b[39;49;00m:\r\n", - " \u001b[94mname\u001b[39;49;00m: mnist\r\n", - " \u001b[94mpredictors\u001b[39;49;00m:\r\n", - " - \u001b[94mgraph\u001b[39;49;00m:\r\n", - " \u001b[94mchildren\u001b[39;49;00m: []\r\n", - " \u001b[94mimplementation\u001b[39;49;00m: TENSORFLOW_SERVER\r\n", - " \u001b[94mmodelUri\u001b[39;49;00m: gs://seldon-models/tfserving/mnist-model\r\n", - " \u001b[94mname\u001b[39;49;00m: mnist-model\r\n", - " \u001b[94mparameters\u001b[39;49;00m:\r\n", - " - \u001b[94mname\u001b[39;49;00m: signature_name\r\n", - " \u001b[94mtype\u001b[39;49;00m: STRING\r\n", - " \u001b[94mvalue\u001b[39;49;00m: predict_images\r\n", - " - \u001b[94mname\u001b[39;49;00m: model_name\r\n", - " \u001b[94mtype\u001b[39;49;00m: STRING\r\n", - " \u001b[94mvalue\u001b[39;49;00m: mnist-model\r\n", - " \u001b[94mname\u001b[39;49;00m: default\r\n", - " \u001b[94mreplicas\u001b[39;49;00m: 1\r\n" - ] - } - ], + "outputs": [], "source": [ "!pygmentize ../servers/tfserving/samples/mnist_rest.yaml" ] }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "metadata": { - "scrolled": false + "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "seldondeployment.machinelearning.seldon.io/tfserving created\r\n" - ] - } - ], + "outputs": [], "source": [ "!kubectl apply -f ../servers/tfserving/samples/mnist_rest.yaml" ] }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "metadata": { "scrolled": false }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "deployment \"mnist-default-725903e\" successfully rolled out\r\n" - ] - } - ], + "outputs": [], "source": [ "!kubectl rollout status deploy/mnist-default-725903e" ] }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "metadata": { "scrolled": false }, @@ -438,846 +222,11 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Success:True message:\n", - "Request:\n", - "data {\n", - " tensor {\n", - " shape: 1\n", - " shape: 784\n", - " values: 0.9077521075865232\n", - " values: 0.481494282768426\n", - " values: 0.4326137508614173\n", - " values: 0.39412542521520766\n", - " values: 0.2540870260101048\n", - " values: 0.7407421531831561\n", - " values: 0.9075359591452956\n", - " values: 0.522853882249715\n", - " values: 0.1544018196830882\n", - " values: 0.2037192050420561\n", - " values: 0.677923447833067\n", - " values: 0.7171500065299637\n", - " values: 0.08749274899503934\n", - " values: 0.7355149948410137\n", - " values: 0.1874174160636044\n", - " values: 0.2909706448666053\n", - " values: 0.3418987032626096\n", - " values: 0.3329337836373806\n", - " values: 0.8434577191142036\n", - " values: 0.4861160253821838\n", - " values: 0.8318209015954431\n", - " values: 0.8976272369790321\n", - " values: 0.9583714018073152\n", - " values: 0.9528392802914424\n", - " values: 0.9071597834656283\n", - " values: 0.9753120098510758\n", - " values: 0.5555712906817557\n", - " values: 0.5346651649789239\n", - " values: 0.09851330244536882\n", - " values: 0.6664706250513927\n", - " values: 0.11403634279172392\n", - " values: 0.5374185329121973\n", - " values: 0.18945161529089793\n", - " values: 0.6047580954105439\n", - " values: 0.8162030686962867\n", - " values: 0.9506767562539185\n", - " values: 0.9479851932512139\n", - " values: 0.12115021686133898\n", - " values: 0.8199392947713652\n", - " values: 0.13879134888490574\n", - " values: 0.43217727107073\n", - " values: 0.19385309094196357\n", - " values: 0.7002771019322341\n", - " values: 0.7362057065470426\n", - " values: 0.2401256998970288\n", - " values: 0.5685737496475256\n", - " values: 0.8748519269774152\n", - " values: 0.2678023087228508\n", - " values: 0.690197466066959\n", - " values: 0.2965633009557601\n", - " values: 0.9853731340532961\n", - " values: 0.5165854031552501\n", - " values: 0.04164511986589636\n", - " values: 0.03459266113483839\n", - " values: 0.875121368994282\n", - " values: 0.946183989304959\n", - " values: 0.7171059548383112\n", - " values: 0.711139525482736\n", - " values: 0.30515788550644796\n", - " values: 0.2108383869427254\n", - " values: 0.2599631409136779\n", - " values: 0.5887264049831238\n", - " values: 0.22017436889596642\n", - " values: 0.33320343029581845\n", - " values: 0.25721323450998834\n", - " values: 0.4083415062751512\n", - " values: 0.8824740663798997\n", - " values: 0.12964814075689712\n", - " values: 0.8672517641036215\n", - " values: 0.49342776596156535\n", - " values: 0.9084096121698604\n", - " values: 0.9956901614042943\n", - " values: 0.3207215015126982\n", - " values: 0.05069254546859181\n", - " values: 0.12175435869855633\n", - " values: 0.7167431811866363\n", - " values: 0.9375471760681595\n", - " values: 0.8258359222213172\n", - " values: 0.2537974308391957\n", - " values: 0.19063929210385766\n", - " values: 0.6253681785529693\n", - " values: 0.9979604315357523\n", - " values: 0.4105176735695927\n", - " values: 0.3168838106627464\n", - " values: 0.8764984367441401\n", - " values: 0.07297344708408238\n", - " values: 0.9034945957649023\n", - " values: 0.5174492705934637\n", - " values: 0.7454240842393215\n", - " values: 0.32454856598378745\n", - " values: 0.8519939059996512\n", - " values: 0.3655353846884615\n", - " values: 0.40537857170813263\n", - " values: 0.01877979169503452\n", - " values: 0.3541760262517297\n", - " values: 0.07949778044507283\n", - " values: 0.3269478297162596\n", - " values: 0.40523941216654347\n", - " values: 0.4201673328416822\n", - " values: 0.5959686959374104\n", - " values: 0.5461462127643125\n", - " values: 0.0588317336886236\n", - " values: 0.6470341112798876\n", - " values: 0.25346133216135736\n", - " values: 0.4133806215145325\n", - " values: 0.2189185289771124\n", - " values: 0.4389444649646417\n", - " values: 0.0314203034627637\n", - " values: 0.7443055340616734\n", - " values: 0.9306987952928957\n", - " values: 0.13495678960560764\n", - " values: 0.06421233011888672\n", - " values: 0.8056759541449129\n", - " values: 0.29314131993308756\n", - " values: 0.9817870038593661\n", - " values: 0.20321266016657136\n", - " values: 0.28126732398512777\n", - " values: 0.5404936255701115\n", - " values: 0.3313533969876933\n", - " values: 0.6463503164520762\n", - " values: 0.4110563299094897\n", - " values: 0.6674592262396479\n", - " values: 0.03352855311467773\n", - " values: 0.9853500436804834\n", - " values: 0.39773794546619234\n", - " values: 0.48157986398866026\n", - " values: 0.08838291345155513\n", - " values: 0.5601097343340652\n", - " values: 0.5170461821964503\n", - " values: 0.3022986200964791\n", - " values: 0.8441608746573529\n", - " values: 0.45890472049955633\n", - " values: 0.9668260645550384\n", - " values: 0.2734664010672849\n", - " values: 0.40905452140433896\n", - " values: 0.20139966034626966\n", - " values: 0.31069816904126624\n", - " values: 0.3141074534113779\n", - " values: 0.5383495685457862\n", - " values: 0.5878362437577416\n", - " values: 0.900632825915421\n", - " values: 0.7517247060103933\n", - " values: 0.7849775075906724\n", - " values: 0.11073609303298759\n", - " values: 0.4708813063727071\n", - " values: 0.8736904922415315\n", - " values: 0.613497143031826\n", - " values: 0.7564955281171043\n", - " values: 0.6538857019937125\n", - " values: 0.5710742882240493\n", - " values: 0.09176048183490748\n", - " values: 0.7180532555674387\n", - " values: 0.7137897948952836\n", - " values: 0.913422170946152\n", - " values: 0.8870195411800623\n", - " values: 0.45942627046411\n", - " values: 0.15888959267291125\n", - " values: 0.525818819911325\n", - " values: 0.1669267414166814\n", - " values: 0.7090522193887324\n", - " values: 0.6999960465014148\n", - " values: 0.3337425697108397\n", - " values: 0.8446054672218825\n", - " values: 0.3384339313798286\n", - " values: 0.0129466070074864\n", - " values: 0.9815696159997467\n", - " values: 0.9385564997287447\n", - " values: 0.055657067560179074\n", - " values: 0.3059768520716226\n", - " values: 0.845921643767685\n", - " values: 0.48531571716505706\n", - " values: 0.34573942505697863\n", - " values: 0.2659489006384149\n", - " values: 0.5834312807801726\n", - " values: 0.6996937710016885\n", - " values: 0.585104606969596\n", - " values: 0.905813339276629\n", - " values: 0.9093475221818289\n", - " values: 0.43671488061955577\n", - " values: 0.1275321424666046\n", - " values: 0.1799671254279993\n", - " values: 0.19225018334168398\n", - " values: 0.40356976730662475\n", - " values: 0.8969527865130271\n", - " values: 0.7388248572902041\n", - " values: 0.9898502183509792\n", - " values: 0.16470103098169941\n", - " values: 0.6826360504345346\n", - " values: 0.5465812398147825\n", - " values: 0.39791315735932065\n", - " values: 0.7498358475081949\n", - " values: 0.5817620127702237\n", - " values: 0.7287255383928085\n", - " values: 0.6300614133050859\n", - " values: 0.006759929350948823\n", - " values: 0.15133246929936894\n", - " values: 0.9190732054788565\n", - " values: 0.08014062539643796\n", - " values: 0.06387343575428595\n", - " values: 0.5546992533141987\n", - " values: 0.8980056804027844\n", - " values: 0.7717073917169363\n", - " values: 0.8251033502985946\n", - " values: 0.730265758035678\n", - " values: 0.5717744355320056\n", - " values: 0.8392623616665479\n", - " values: 0.6620350365504034\n", - " values: 0.40160037761891876\n", - " values: 0.9194226912589473\n", - " values: 0.6265223660845535\n", - " values: 0.21339439778547298\n", - " values: 0.38323816108099706\n", - " values: 0.9185894978629671\n", - " values: 0.5096514584839736\n", - " values: 0.6812162836171367\n", - " values: 0.6997217567364962\n", - " values: 0.47963809434789595\n", - " values: 0.93788528127083\n", - " values: 0.07966937955838804\n", - " values: 0.6758292450211021\n", - " values: 0.6995864890439781\n", - " values: 0.4250193451588421\n", - " values: 0.26385131302964326\n", - " values: 0.6432186176346865\n", - " values: 0.06834322210336097\n", - " values: 0.45200576122218505\n", - " values: 0.30544317685991806\n", - " values: 0.954184950859937\n", - " values: 0.3168049854994762\n", - " values: 0.04382535151929756\n", - " values: 0.7077794721469689\n", - " values: 0.915641212298199\n", - " values: 0.5399896747002474\n", - " values: 0.9071322766076261\n", - " values: 0.809480247635963\n", - " values: 0.6183767306823053\n", - " values: 0.4989792831935491\n", - " values: 0.5566420560857704\n", - " values: 0.28131465934491917\n", - " values: 0.6198393334203187\n", - " values: 0.2847876808231876\n", - " values: 0.39879754971094694\n", - " values: 0.7641507843049505\n", - " values: 0.7087838197761839\n", - " values: 0.7686490658733188\n", - " values: 0.44335718244127187\n", - " values: 0.5257609078029385\n", - " values: 0.38864304179006237\n", - " values: 0.784315275795489\n", - " values: 0.3025741615571287\n", - " values: 0.44984412120157846\n", - " values: 0.9753730738951223\n", - " values: 0.46042333036929983\n", - " values: 0.2785453696175244\n", - " values: 0.3557441211522473\n", - " values: 0.2065261822170904\n", - " values: 0.8656969913703902\n", - " values: 0.7830888985890412\n", - " values: 0.37254760294416644\n", - " values: 0.1274745443411992\n", - " values: 0.4755592622465281\n", - " values: 0.9380180962632326\n", - " values: 0.06627025923210328\n", - " values: 0.5416359526825841\n", - " values: 0.3847358769796979\n", - " values: 0.788929157967847\n", - " values: 0.452029024590188\n", - " values: 0.6038399296322474\n", - " values: 0.7578896956590857\n", - " values: 0.8159407678380852\n", - " values: 0.1617806049274504\n", - " values: 0.08496503389929166\n", - " values: 0.7761886063523513\n", - " values: 0.8287649641354454\n", - " values: 0.05201120030071571\n", - " values: 0.9871745423563195\n", - " values: 0.07429616700162434\n", - " values: 0.9457548972686184\n", - " values: 0.7129751189790626\n", - " values: 0.17167836208578513\n", - " values: 0.20736184175081662\n", - " values: 0.47226701706139784\n", - " values: 0.03714312930828212\n", - " values: 0.019620223873771048\n", - " values: 0.12352759107164102\n", - " values: 0.11745206287623144\n", - " values: 0.6141019739337564\n", - " values: 0.6724586465974781\n", - " values: 0.49047391754704384\n", - " values: 0.711111000388292\n", - " values: 0.19964347859340625\n", - " values: 0.16585905798679745\n", - " values: 0.7491394446205853\n", - " values: 0.15662528451359004\n", - " values: 0.74553925316068\n", - " values: 0.9921800659735481\n", - " values: 0.37798204533805835\n", - " values: 0.020023594430017333\n", - " values: 0.1760922605050893\n", - " values: 0.8649293669377118\n", - " values: 0.7302487848489226\n", - " values: 0.5198664820791008\n", - " values: 0.19248209716151943\n", - " values: 0.5526354408981655\n", - " values: 0.16851977202084067\n", - " values: 0.6853383811610501\n", - " values: 0.7883012648335126\n", - " values: 0.4090391314164493\n", - " values: 0.5099027093103421\n", - " values: 0.8792288317360334\n", - " values: 0.9083022442000636\n", - " values: 0.3953592255270043\n", - " values: 0.01976293322262812\n", - " values: 0.1744975280305271\n", - " values: 0.5940164994944154\n", - " values: 0.0071678139933238905\n", - " values: 0.5912404863819163\n", - " values: 0.3323256433868679\n", - " values: 0.7006997605780735\n", - " values: 0.967070265181319\n", - " values: 0.20201948750739673\n", - " values: 0.3188616803044195\n", - " values: 0.0019118753682740852\n", - " values: 0.3832876526620892\n", - " values: 0.6640055176376981\n", - " values: 0.2519498140416925\n", - " values: 0.576058482421403\n", - " values: 0.6037176923917787\n", - " values: 0.36680621476203223\n", - " values: 0.48300179956796063\n", - " values: 0.44422420895957093\n", - " values: 0.7723379128268046\n", - " values: 0.24494011281298544\n", - " values: 0.21985024161457256\n", - " values: 0.6002822764352966\n", - " values: 0.7979418446444293\n", - " values: 0.14225506908999608\n", - " values: 0.22708068055376207\n", - " values: 0.39872760984431965\n", - " values: 0.6931143559812282\n", - " values: 0.34982758119976665\n", - " values: 0.9813447173230605\n", - " values: 0.8846214378581\n", - " values: 0.1727314402406538\n", - " values: 0.6461148758751788\n", - " values: 0.26156684816486553\n", - " values: 0.9821875485658176\n", - " values: 0.4801414752947186\n", - " values: 0.43660682661084393\n", - " values: 0.9533840238439006\n", - " values: 0.6844987820597782\n", - " values: 0.13065218133040113\n", - " values: 0.12397102261191695\n", - " values: 0.5913593358389608\n", - " values: 0.422196903055467\n", - " values: 0.9899904832774842\n", - " values: 0.5015699030050338\n", - " values: 0.5635105154762816\n", - " values: 0.09007555144042134\n", - " values: 0.45811623522030454\n", - " values: 0.23709441812252185\n", - " values: 0.4536158131381315\n", - " values: 0.1678584897082751\n", - " values: 0.14302774210320357\n", - " values: 0.40271091105845247\n", - " values: 0.3552945845567954\n", - " values: 0.9674679192088959\n", - " values: 0.05362901797318054\n", - " values: 0.24761227866717794\n", - " values: 0.11473342851728396\n", - " values: 0.11455690634212967\n", - " values: 0.6030008249121739\n", - " values: 0.6350099835235462\n", - " values: 0.4014656806091461\n", - " values: 0.3301113690322718\n", - " values: 0.4423031052383737\n", - " values: 0.34067560207261816\n", - " values: 0.5190962714102658\n", - " values: 0.18400892018391657\n", - " values: 0.04613793028959923\n", - " values: 0.08184859411457046\n", - " values: 0.14220816502134126\n", - " values: 0.5888868915840526\n", - " values: 0.7282728628792509\n", - " values: 0.7997321499744532\n", - " values: 0.6348531824349741\n", - " values: 0.937039428120765\n", - " values: 0.7379013101674876\n", - " values: 0.6279974097182232\n", - " values: 0.6771120837077502\n", - " values: 0.9680336277564523\n", - " values: 0.4323034732184443\n", - " values: 0.38256890191644943\n", - " values: 0.3949529991175674\n", - " values: 0.22323515486310652\n", - " values: 0.8854257065067576\n", - " values: 0.5442978118638369\n", - " values: 0.61698365332973\n", - " values: 0.5646449672787983\n", - " values: 0.7351595577725178\n", - " values: 0.8317027094363203\n", - " values: 0.1652108534709511\n", - " values: 0.44276746195670413\n", - " values: 0.0480511080608671\n", - " values: 0.7847332552735922\n", - " values: 0.2861667209296781\n", - " values: 0.43396462235585664\n", - " values: 0.8740517166590877\n", - " values: 0.47363629735283863\n", - " values: 0.297804662695365\n", - " values: 0.26206164876411364\n", - " values: 0.2221114169935421\n", - " values: 0.9904257335908437\n", - " values: 0.973512309966912\n", - " values: 0.872741194962346\n", - " values: 0.6162183326310449\n", - " values: 0.4717639237342467\n", - " values: 0.5946959447967756\n", - " values: 0.08364253300241775\n", - " values: 0.8228728468374276\n", - " values: 0.03187493578351741\n", - " values: 0.11208172240344061\n", - " values: 0.7911110665711703\n", - " values: 0.16894333245021365\n", - " values: 0.8935865113997377\n", - " values: 0.46194856941270057\n", - " values: 0.28900045554887455\n", - " values: 0.36804241612003197\n", - " values: 0.7157022046996451\n", - " values: 0.2662750204946921\n", - " values: 0.34708304252973576\n", - " values: 0.379236435188267\n", - " values: 0.26839638586624026\n", - " values: 0.8830382071112963\n", - " values: 0.040926058120172915\n", - " values: 0.9308131814794145\n", - " values: 0.750290103726899\n", - " values: 0.6489568715658779\n", - " values: 0.9338467384242554\n", - " values: 0.08709331757722116\n", - " values: 0.8249459529019156\n", - " values: 0.9069844171536067\n", - " values: 0.7373479465685048\n", - " values: 0.6477417510689855\n", - " values: 0.22128369908778323\n", - " values: 0.27290140180043143\n", - " values: 0.5894189593816577\n", - " values: 0.8139230696156499\n", - " values: 0.6983637639498655\n", - " values: 0.5716461612867579\n", - " values: 0.16309948866581503\n", - " values: 0.1722235481663491\n", - " values: 0.5585793241817713\n", - " values: 0.07007029349722582\n", - " values: 0.01832110338025872\n", - " values: 0.9823413546435054\n", - " values: 0.8648167716550387\n", - " values: 0.5414706636843953\n", - " values: 0.48115823082855935\n", - " values: 0.1160632929189871\n", - " values: 0.6660648966746223\n", - " values: 0.8233989374854808\n", - " values: 0.5172392784197191\n", - " values: 0.41165419747562726\n", - " values: 0.2368693846007296\n", - " values: 0.49981148544390364\n", - " values: 0.37352432957799386\n", - " values: 0.44483391555614527\n", - " values: 0.6038501819758755\n", - " values: 0.822243312724317\n", - " values: 0.7594190113305364\n", - " values: 0.10325211183093186\n", - " values: 0.6832226555532109\n", - " values: 0.011547990803500219\n", - " values: 0.926633650776553\n", - " values: 0.9607695045321044\n", - " values: 0.1888093670878398\n", - " values: 0.31004902768973197\n", - " values: 0.1968644539610982\n", - " values: 0.1116220570139822\n", - " values: 0.8237000276548585\n", - " values: 0.5576322247118044\n", - " values: 0.5451797579599671\n", - " values: 0.07397666448495577\n", - " values: 0.699068832427997\n", - " values: 0.34037110088694666\n", - " values: 0.8355346262323432\n", - " values: 0.8678709461970506\n", - " values: 0.47716139260833057\n", - " values: 0.5953851115455981\n", - " values: 0.5906272455573931\n", - " values: 0.9566657137247552\n", - " values: 0.040178333174049286\n", - " values: 0.855174510011565\n", - " values: 0.8525017922650974\n", - " values: 0.0676054200933901\n", - " values: 0.010647036269682664\n", - " values: 0.9901662227237883\n", - " values: 0.3829794125726609\n", - " values: 0.5786148018506755\n", - " values: 0.3279516146214393\n", - " values: 0.6859118143302187\n", - " values: 0.16794812254574942\n", - " values: 0.22458031857158167\n", - " values: 0.08367973137918772\n", - " values: 0.7932588553539374\n", - " values: 0.4520929865066634\n", - " values: 0.19603450144115775\n", - " values: 0.8539684220265759\n", - " values: 0.44271583187617447\n", - " values: 0.26449532190247405\n", - " values: 0.43714482854083336\n", - " values: 0.5183945797513123\n", - " values: 0.9662430578889708\n", - " values: 0.4885251110450807\n", - " values: 0.3421088216902748\n", - " values: 0.22084195962886788\n", - " values: 0.44917701657597753\n", - " values: 0.6627206186910988\n", - " values: 0.07371198641675092\n", - " values: 0.29682810148406824\n", - " values: 0.40184749596752156\n", - " values: 0.7016311261752014\n", - " values: 0.20667062986538132\n", - " values: 0.791517683110984\n", - " values: 0.9636649741803575\n", - " values: 0.5014969709134501\n", - " values: 0.7268500561755789\n", - " values: 0.7440510177008851\n", - " values: 0.00678820738293151\n", - " values: 0.6005865501540042\n", - " values: 0.6978737371652832\n", - " values: 0.272534333192206\n", - " values: 0.6188358819258538\n", - " values: 0.42740299705422535\n", - " values: 0.22385404166424372\n", - " values: 0.5578201727389978\n", - " values: 0.7127465217809483\n", - " values: 0.4246256541577902\n", - " values: 0.36178839461702705\n", - " values: 0.9514119709130309\n", - " values: 0.12683392567556084\n", - " values: 0.12900705136108126\n", - " values: 0.9595098102827797\n", - " values: 0.8229392974283574\n", - " values: 0.9618527426136988\n", - " values: 0.07498182998390135\n", - " values: 0.8722028904669875\n", - " values: 0.1788560444756805\n", - " values: 0.4360212251825827\n", - " values: 0.3719624804939067\n", - " values: 0.32744371512896\n", - " values: 0.6323399169323207\n", - " values: 0.7901947043851578\n", - " values: 0.9069744232524791\n", - " values: 0.539297251209952\n", - " values: 0.9685459598996273\n", - " values: 0.6387646766404155\n", - " values: 0.7057360097043298\n", - " values: 0.41464053398627587\n", - " values: 0.5909737362549322\n", - " values: 0.4231307540738921\n", - " values: 0.8892214616835967\n", - " values: 0.16542092493286398\n", - " values: 0.12436665530108881\n", - " values: 0.21269549412403754\n", - " values: 0.16750085284470706\n", - " values: 0.2685364164843447\n", - " values: 0.49111620940908385\n", - " values: 0.038778167269567065\n", - " values: 0.3531032452281505\n", - " values: 0.04817171907005713\n", - " values: 0.6329462587108713\n", - " values: 0.09247724729111084\n", - " values: 0.6404776150622576\n", - " values: 0.7107595349316268\n", - " values: 0.9543706054738256\n", - " values: 0.2991778606367096\n", - " values: 0.6801008553705754\n", - " values: 0.23735727923172545\n", - " values: 0.2984507751655058\n", - " values: 0.4846526063688782\n", - " values: 0.7124608669673974\n", - " values: 0.16226549607943863\n", - " values: 0.49822984364082445\n", - " values: 0.2542702429530228\n", - " values: 0.8450525411266374\n", - " values: 0.9592828945164622\n", - " values: 0.7781607636185804\n", - " values: 0.6795508221033534\n", - " values: 0.31426524415972834\n", - " values: 0.26718782842433453\n", - " values: 0.2876891221104757\n", - " values: 0.9645072378578025\n", - " values: 0.772275901242856\n", - " values: 0.731436551616058\n", - " values: 0.6686478374806571\n", - " values: 0.32149100581639456\n", - " values: 0.49452545422153305\n", - " values: 0.24362732793678954\n", - " values: 0.06707004493306379\n", - " values: 0.18723674360907416\n", - " values: 0.06008848246749732\n", - " values: 0.28924702207641106\n", - " values: 0.926531716272654\n", - " values: 0.31226492240184034\n", - " values: 0.06781473749895461\n", - " values: 0.4710978034221338\n", - " values: 0.06281968696643625\n", - " values: 0.28523659595804396\n", - " values: 0.7122239843369272\n", - " values: 0.9067449772727121\n", - " values: 0.08590125743812504\n", - " values: 0.07170873779596532\n", - " values: 0.5121937829565428\n", - " values: 0.5872841046485451\n", - " values: 0.6745937585498459\n", - " values: 0.6347679752558522\n", - " values: 0.8300893851806067\n", - " values: 0.40373849636177006\n", - " values: 0.5056810812088577\n", - " values: 0.9318252869250693\n", - " values: 0.09120234547446626\n", - " values: 0.9515281674484714\n", - " values: 0.8959005725452157\n", - " values: 0.959471015249324\n", - " values: 0.7417710004804116\n", - " values: 0.3637148906031854\n", - " values: 0.6668962623402832\n", - " values: 0.407069673755715\n", - " values: 0.12727249015165087\n", - " values: 0.44503512810766077\n", - " values: 0.5795923043023258\n", - " values: 0.7728760488461031\n", - " values: 0.0640742724509058\n", - " values: 0.921431248028783\n", - " values: 0.032772832408754615\n", - " values: 0.8780195121038374\n", - " values: 0.8652066209109612\n", - " values: 0.3300566521367848\n", - " values: 0.3567832572838985\n", - " values: 0.49278101922172934\n", - " values: 0.3975564083343852\n", - " values: 0.4145829495665915\n", - " values: 0.28120679854237973\n", - " values: 0.2387923478106284\n", - " values: 0.893505882736082\n", - " values: 0.3606615746807059\n", - " values: 0.9618500294957606\n", - " values: 0.2340210886532127\n", - " values: 0.5447126871511164\n", - " values: 0.5889447944390054\n", - " values: 0.819745685184733\n", - " values: 0.02548883415261649\n", - " values: 0.363754842666297\n", - " values: 0.3370985337010173\n", - " values: 0.10525235238557928\n", - " values: 0.21094628212443012\n", - " values: 0.24746092911149764\n", - " values: 0.12198481546936768\n", - " values: 0.09471286541829926\n", - " values: 0.7719980765267516\n", - " values: 0.05612253963845715\n", - " values: 0.0941304246811967\n", - " values: 0.29361945571976766\n", - " values: 0.24041279887794775\n", - " values: 0.717275822374148\n", - " values: 0.4775844795908343\n", - " values: 0.7929067287637038\n", - " values: 0.5657212144562104\n", - " values: 0.4402157142307336\n", - " values: 0.708162036149398\n", - " values: 0.9835979580371608\n", - " values: 0.05341360097141845\n", - " values: 0.8822283459994484\n", - " values: 0.780886494239581\n", - " values: 0.9062254482522679\n", - " values: 0.002937430620338466\n", - " values: 0.9308995279182417\n", - " values: 0.22217759527834147\n", - " values: 0.9340713989063913\n", - " values: 0.25626424117423574\n", - " values: 0.9738045485471475\n", - " values: 0.7393928367376609\n", - " values: 0.9410008642817723\n", - " values: 0.4616147658598284\n", - " values: 0.5274360135298201\n", - " values: 0.5706031077908477\n", - " values: 0.605960965886261\n", - " values: 0.06865776719291505\n", - " values: 0.7463399546947039\n", - " values: 0.9465718949309232\n", - " values: 0.16276087748742318\n", - " values: 0.8759624897013112\n", - " values: 0.8406560511666052\n", - " values: 0.20030208981003628\n", - " values: 0.971229946572198\n", - " values: 0.8361440720624936\n", - " values: 0.956590113668845\n", - " values: 0.026867803430079773\n", - " values: 0.877415236641461\n", - " values: 0.41353768698659255\n", - " values: 0.9755756081115282\n", - " values: 0.9581062338361362\n", - " values: 0.7286753909489332\n", - " values: 0.1366174849853039\n", - " values: 0.33159751637360513\n", - " values: 0.269629059151394\n", - " values: 0.5959313702623316\n", - " values: 0.2487390189530485\n", - " values: 0.8511731265658438\n", - " values: 0.5510426911265237\n", - " values: 0.7119347917180158\n", - " values: 0.41831969045048634\n", - " values: 0.25672211233008324\n", - " values: 0.3351115524883752\n", - " values: 0.5473728133293863\n", - " values: 0.8698702730922934\n", - " values: 0.4183321620814887\n", - " values: 0.5808441958265111\n", - " values: 0.5899409466557799\n", - " values: 0.7437713325809862\n", - " values: 0.11660177234934732\n", - " values: 0.25671162938525816\n", - " values: 0.9822719625872597\n", - " values: 0.2694914977887034\n", - " values: 0.7958360217722709\n", - " values: 0.3170353357514133\n", - " values: 0.3137220778781522\n", - " values: 0.8398038765668162\n", - " values: 0.3175929754177462\n", - " values: 0.45981590545966644\n", - " values: 0.5710696822079859\n", - " values: 0.6013346665501819\n", - " values: 0.15384371146908704\n", - " values: 0.8140416733389426\n", - " values: 0.1414088970101569\n", - " values: 0.17869468884182194\n", - " values: 0.9187388331605334\n", - " values: 0.7324791026349019\n", - " values: 0.7655298305360002\n", - " values: 0.7059652847757655\n", - " values: 0.08601685746283005\n", - " values: 0.5966730781846389\n", - " values: 0.13431141424638915\n", - " values: 0.8530889075674768\n", - " values: 0.8598000683570904\n", - " values: 0.2842885523840374\n", - " values: 0.02750575760593954\n", - " values: 0.1797823815064442\n", - " values: 0.12714869089939185\n", - " values: 0.4975971251465324\n", - " values: 0.3221016176236038\n", - " values: 0.45512114528222825\n", - " values: 0.4157613212615767\n", - " values: 0.7246528729756676\n", - " values: 0.19512065773227205\n", - " values: 0.8790379106412447\n", - " values: 0.8082845359180735\n", - " values: 0.5609260398104546\n", - " values: 0.7053267795231765\n", - " values: 0.34835983964628525\n", - " values: 0.45300891740415394\n", - " values: 0.12995598936734165\n", - " values: 0.3168660217597954\n", - " values: 0.9315366401612346\n", - " values: 0.05601865178739818\n", - " values: 0.8856050356252713\n", - " values: 0.49563050222288596\n", - " values: 0.5749391280011129\n", - " values: 0.32639189730027984\n", - " values: 0.05098419319526892\n", - " values: 0.3701586394920481\n", - " values: 0.6478621909766373\n", - " values: 0.5283307951839492\n", - " values: 0.22595941299244948\n", - " values: 0.39109891787338846\n", - " values: 0.09922432902413403\n", - " values: 0.14668497452577622\n", - " values: 0.4962743472271398\n", - " values: 0.8836883218124524\n", - " values: 0.2046467014692518\n", - " values: 0.2632654965572435\n", - " values: 0.5390020292637117\n", - " }\n", - "}\n", - "\n", - "Response:\n", - "meta {\n", - " puid: \"kioo19njo8rftrjnecngfdau4t\"\n", - " requestPath {\n", - " key: \"mnist-model\"\n", - " value: \"seldonio/tfserving-proxy_rest:0.7\"\n", - " }\n", - "}\n", - "data {\n", - " names: \"t:0\"\n", - " names: \"t:1\"\n", - " names: \"t:2\"\n", - " names: \"t:3\"\n", - " names: \"t:4\"\n", - " names: \"t:5\"\n", - " names: \"t:6\"\n", - " names: \"t:7\"\n", - " names: \"t:8\"\n", - " names: \"t:9\"\n", - " tensor {\n", - " shape: 1\n", - " shape: 10\n", - " values: 2.35872439e-19\n", - " values: 2.979662e-33\n", - " values: 0.984709799\n", - " values: 0.0124261566\n", - " values: 5.1570811e-26\n", - " values: 0.00286374264\n", - " values: 2.4608495e-18\n", - " values: 3.90025435e-14\n", - " values: 2.28899552e-07\n", - " values: 4.03268579e-20\n", - " }\n", - "}\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "r = sc.predict(gateway=\"ambassador\",transport=\"rest\",shape=(1,784))\n", "print(r)" @@ -1285,23 +234,75 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "metadata": { "scrolled": false }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "seldondeployment.machinelearning.seldon.io \"tfserving\" deleted\r\n" - ] - } - ], + "outputs": [], "source": [ "!kubectl delete -f ../servers/tfserving/samples/mnist_rest.yaml" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Serve Tensorflow MNIST Model with Tensorflow protocol\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "!pygmentize ../servers/tfserving/samples/halfplustwo_rest.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "!kubectl apply -f ../servers/tfserving/samples/halfplustwo_rest.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "!kubectl rollout status deploy/hpt-default-a79d958" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!curl -d '{\"instances\": [1.0, 2.0, 5.0]}' \\\n", + " -X POST http://localhost:8003/seldon/seldon/hpt/v1/models/halfplustwo/:predict \\\n", + " -H \"Content-Type: application/json\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!kubectl delete -f ../servers/tfserving/samples/halfplustwo_rest.yaml" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -1311,78 +312,40 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": null, "metadata": { "scrolled": false }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[94mapiVersion\u001b[39;49;00m: machinelearning.seldon.io/v1alpha2\r\n", - "\u001b[94mkind\u001b[39;49;00m: SeldonDeployment\r\n", - "\u001b[94mmetadata\u001b[39;49;00m:\r\n", - " \u001b[94mname\u001b[39;49;00m: mlflow\r\n", - "\u001b[94mspec\u001b[39;49;00m:\r\n", - " \u001b[94mname\u001b[39;49;00m: wines\r\n", - " \u001b[94mpredictors\u001b[39;49;00m:\r\n", - " - \u001b[94mgraph\u001b[39;49;00m:\r\n", - " \u001b[94mchildren\u001b[39;49;00m: []\r\n", - " \u001b[94mimplementation\u001b[39;49;00m: MLFLOW_SERVER\r\n", - " \u001b[94mmodelUri\u001b[39;49;00m: gs://seldon-models/mlflow/elasticnet_wine\r\n", - " \u001b[94mname\u001b[39;49;00m: classifier\r\n", - " \u001b[94mname\u001b[39;49;00m: default\r\n", - " \u001b[94mreplicas\u001b[39;49;00m: 1\r\n" - ] - } - ], + "outputs": [], "source": [ "!pygmentize ../servers/mlflowserver/samples/elasticnet_wine.yaml" ] }, { "cell_type": "code", - "execution_count": 32, + "execution_count": null, "metadata": { "scrolled": false }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "seldondeployment.machinelearning.seldon.io/mlflow created\r\n" - ] - } - ], + "outputs": [], "source": [ "!kubectl apply -f ../servers/mlflowserver/samples/elasticnet_wine.yaml" ] }, { "cell_type": "code", - "execution_count": 33, + "execution_count": null, "metadata": { "scrolled": false }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Waiting for deployment \"wines-default-8c791aa\" rollout to finish: 0 of 1 updated replicas are available...\n", - "deployment \"wines-default-8c791aa\" successfully rolled out\n" - ] - } - ], + "outputs": [], "source": [ "!kubectl rollout status deploy/wines-default-8c791aa" ] }, { "cell_type": "code", - "execution_count": 34, + "execution_count": null, "metadata": { "scrolled": false }, @@ -1394,53 +357,11 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Success:True message:\n", - "Request:\n", - "data {\n", - " tensor {\n", - " shape: 1\n", - " shape: 11\n", - " values: 0.6907478776933986\n", - " values: 0.8309510656778891\n", - " values: 0.562660723739024\n", - " values: 0.016727865284311028\n", - " values: 0.2935636350018127\n", - " values: 0.6049700606468156\n", - " values: 0.26904374522513186\n", - " values: 0.059964759754932206\n", - " values: 0.11900130537727505\n", - " values: 0.30425941706369986\n", - " values: 0.42276301953693896\n", - " }\n", - "}\n", - "\n", - "Response:\n", - "meta {\n", - " puid: \"kh2v4blpf1fh8c91o7jl4ucocr\"\n", - " requestPath {\n", - " key: \"classifier\"\n", - " value: \"seldonio/mlflowserver_rest:0.2\"\n", - " }\n", - "}\n", - "data {\n", - " tensor {\n", - " shape: 1\n", - " values: 5.212350520219259\n", - " }\n", - "}\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "r = sc.predict(gateway=\"ambassador\",transport=\"rest\",shape=(1,11))\n", "print(r)" @@ -1448,19 +369,11 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": null, "metadata": { "scrolled": false }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "seldondeployment.machinelearning.seldon.io \"mlflow\" deleted\r\n" - ] - } - ], + "outputs": [], "source": [ "!kubectl delete -f ../servers/mlflowserver/samples/elasticnet_wine.yaml" ] @@ -1483,7 +396,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.4" + "version": "3.6.8" }, "varInspector": { "cols": { diff --git a/operator/apis/machinelearning/v1/seldondeployment_types.go b/operator/apis/machinelearning/v1/seldondeployment_types.go index 16f85f457e..5698702651 100644 --- a/operator/apis/machinelearning/v1/seldondeployment_types.go +++ b/operator/apis/machinelearning/v1/seldondeployment_types.go @@ -46,6 +46,7 @@ const ( ANNOTATION_HEADLESS_SVC = "seldon.io/headless-svc" ANNOTATION_NO_ENGINE = "seldon.io/no-engine" ANNOTATION_CUSTOM_SVC_NAME = "seldon.io/svc-name" + ANNOTATION_EXECUTOR = "seldon.io/executor" ) func hash(text string) string { @@ -224,8 +225,24 @@ type PredictorSpec struct { Traffic int32 `json:"traffic,omitempty" protobuf:"bytes,9,opt,name=traffic"` Explainer Explainer `json:"explainer,omitempty" protobuf:"bytes,10,opt,name=explainer"` Shadow bool `json:"shadow,omitempty" protobuf:"bytes,11,opt,name=shadow"` + Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,12,opt,name=protocol"` + Transport Transport `json:"transport,omitempty" protobuf:"bytes,13,opt,name=transport"` } +type Protocol string + +const ( + ProtocolSeldon Protocol = "seldon" + ProtocolTensorflow Protocol = "tensorflow" +) + +type Transport string + +const ( + TransportRest Transport = "rest" + TransportGrpc Transport = "grpc" +) + type SvcOrchSpec struct { Resources *v1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"` Env []*v1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,opt,name=env"` diff --git a/operator/apis/machinelearning/v1/seldondeployment_webhook.go b/operator/apis/machinelearning/v1/seldondeployment_webhook.go index 9b47cc4070..f4bb49ff72 100644 --- a/operator/apis/machinelearning/v1/seldondeployment_webhook.go +++ b/operator/apis/machinelearning/v1/seldondeployment_webhook.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + "github.com/seldonio/seldon-core/operator/constants" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -181,7 +182,7 @@ func getUpdatePortNumMap(name string, nextPortNum *int32, portMap map[string]int func (r *SeldonDeploymentSpec) DefaultSeldonDeployment(namespace string) { - var firstPuPortNum int32 = 9000 + var firstPuPortNum int32 = constants.FirstPortNumber if env_preditive_unit_service_port, ok := os.LookupEnv("PREDICTIVE_UNIT_SERVICE_PORT"); ok { portNum, err := strconv.Atoi(env_preditive_unit_service_port) if err != nil { @@ -296,7 +297,11 @@ func (r *SeldonDeploymentSpec) DefaultSeldonDeployment(namespace string) { // pu needs to have an endpoint as engine reads it from SDep in order to direct graph traffic // probes etc will be added later by controller if pu.Endpoint == nil { - pu.Endpoint = &Endpoint{Type: REST} + if p.Transport == TransportGrpc { + pu.Endpoint = &Endpoint{Type: GRPC} + } else { + pu.Endpoint = &Endpoint{Type: REST} + } } var portType string if pu.Endpoint.Type == GRPC { @@ -406,6 +411,12 @@ func checkPredictiveUnits(pu *PredictiveUnit, p *PredictorSpec, fldPath *field.P } } + if pu.Logger != nil { + if pu.Logger.Mode == "" { + allErrs = append(allErrs, field.Invalid(fldPath, pu.Logger.Mode, "No logger mode specified")) + } + } + for i := 0; i < len(pu.Children); i++ { allErrs = checkPredictiveUnits(&pu.Children[i], p, fldPath.Index(i), allErrs) } @@ -460,6 +471,16 @@ func (r *SeldonDeploymentSpec) ValidateSeldonDeployment() error { } predictorNames[p.Name] = true allErrs = checkPredictiveUnits(p.Graph, &p, field.NewPath("spec").Child("predictors").Index(i).Child("graph"), allErrs) + + if p.Protocol != "" && !(p.Protocol == ProtocolSeldon || p.Protocol == ProtocolTensorflow) { + fldPath := field.NewPath("spec").Child("predictors").Index(i) + allErrs = append(allErrs, field.Invalid(fldPath, p.Protocol, "Invalid protocol")) + } + + if p.Transport != "" && !(p.Transport == TransportRest || p.Transport == TransportGrpc) { + fldPath := field.NewPath("spec").Child("predictors").Index(i) + allErrs = append(allErrs, field.Invalid(fldPath, p.Transport, "Invalid transport")) + } } allErrs = checkTraffic(r, field.NewPath("spec"), allErrs) diff --git a/operator/config/crd/bases/machinelearning.seldon.io_seldondeployments.yaml b/operator/config/crd/bases/machinelearning.seldon.io_seldondeployments.yaml index 540340bf39..94dd4eaf66 100644 --- a/operator/config/crd/bases/machinelearning.seldon.io_seldondeployments.yaml +++ b/operator/config/crd/bases/machinelearning.seldon.io_seldondeployments.yaml @@ -5898,6 +5898,8 @@ spec: type: object name: type: string + protocol: + type: string replicas: format: int32 type: integer @@ -6036,6 +6038,8 @@ spec: traffic: format: int32 type: integer + transport: + type: string required: - graph - name diff --git a/operator/config/crd/kustomization.yaml b/operator/config/crd/kustomization.yaml index 32c651d3d4..adba30fd95 100644 --- a/operator/config/crd/kustomization.yaml +++ b/operator/config/crd/kustomization.yaml @@ -31,6 +31,7 @@ patchesStrategicMerge: # Warning # DownwardAPI volumes are not presently supported due to: https://github.com/SeldonIO/seldon-core/issues/926 +# Also see: https://github.com/kubernetes-sigs/kubebuilder/issues/1128 # Remove volume patch below for Kubernetes 1.17, 1.18 when tested patchesJson6902: diff --git a/operator/config/manager/manager.yaml b/operator/config/manager/manager.yaml index c1f19bb835..3c4f883fdb 100644 --- a/operator/config/manager/manager.yaml +++ b/operator/config/manager/manager.yaml @@ -57,13 +57,29 @@ spec: - name: ENGINE_SERVER_PORT value: "8000" - name: ENGINE_PROMETHEUS_PATH - value: prometheus + value: "/prometheus" - name: ISTIO_ENABLED value: "false" - name: ISTIO_GATEWAY value: seldon-gateway - name: ISTIO_TLS_MODE value: "" + - name: USE_EXECUTOR + value: "true" + - name: EXECUTOR_CONTAINER_IMAGE_AND_VERSION + value: seldonio/seldon-core-executor:1.0.1-SNAPSHOT + - name: EXECUTOR_CONTAINER_IMAGE_PULL_POLICY + value: IfNotPresent + - name: EXECUTOR_PROMETHEUS_PATH + value: "/prometheus" + - name: EXECUTOR_SERVER_GRPC_PORT + value: "5001" + - name: EXECUTOR_SERVER_PORT + value: "8000" + - name: EXECUTOR_CONTAINER_USER + value: "8888" + - name: EXECUTOR_CONTAINER_SERVICE_ACCOUNT_NAME + value: default image: controller:latest name: manager resources: diff --git a/operator/constants/constants.go b/operator/constants/constants.go index f45886f54f..a698b55e14 100644 --- a/operator/constants/constants.go +++ b/operator/constants/constants.go @@ -3,4 +3,17 @@ package constants const ( PU_PARAMETER_ENVVAR = "PREDICTIVE_UNIT_PARAMETERS" TFServingContainerName = "tfserving" + + GRPCRegExMatchAmbassador = "/(seldon.protos.*|tensorflow.serving.*)/.*" + GRPCRegExMatchIstio = ".*tensorflow.*|.*seldon.protos.*" + + PrePackedServerTensorflow = "TENSORFLOW_SERVER" + PrePackedServerSklearn = "SKLEARN_SERVER" + + TfServingGrpcPort = 2000 + TfServingRestPort = 2001 + TfServingArgPort = "--port=" + TfServingArgRestPort = "--rest_api_port=" + + FirstPortNumber = 9000 ) diff --git a/operator/controllers/ambassador.go b/operator/controllers/ambassador.go index 21197a1011..6823a5ac56 100644 --- a/operator/controllers/ambassador.go +++ b/operator/controllers/ambassador.go @@ -1,6 +1,7 @@ package controllers import ( + "github.com/seldonio/seldon-core/operator/constants" "strconv" "strings" @@ -30,7 +31,8 @@ type AmbassadorConfig struct { Name string `yaml:"name"` Grpc *bool `yaml:"grpc,omitempty"` Prefix string `yaml:"prefix"` - Rewrite string `yaml:"rewrite,omitempty"` + PrefixRegex *bool `yaml:"prefix_regex,omitempty"` + Rewrite string `yaml:"rewrite"` Service string `yaml:"service"` TimeoutMs int `yaml:"timeout_ms"` IdleTimeoutMs *int `yaml:"idle_timeout_ms,omitempty"` @@ -80,6 +82,7 @@ func getAmbassadorRestConfig(mlDep *machinelearningv1.SeldonDeployment, Kind: "Mapping", Name: "seldon_" + mlDep.ObjectMeta.Name + "_" + name + "_rest_mapping", Prefix: "/seldon/" + serviceNameExternal + "/", + Rewrite: "/", Service: serviceName + "." + namespace + ":" + strconv.Itoa(engine_http_port), TimeoutMs: timeout, RetryPolicy: &AmbassadorRetryPolicy{ @@ -164,15 +167,16 @@ func getAmbassadorGrpcConfig(mlDep *machinelearningv1.SeldonDeployment, } c := AmbassadorConfig{ - ApiVersion: "ambassador/v1", - Kind: "Mapping", - Name: "seldon_" + mlDep.ObjectMeta.Name + "_" + name + "_grpc_mapping", - Grpc: &grpc, - Prefix: "/seldon.protos.Seldon/", - Rewrite: "/seldon.protos.Seldon/", - Headers: map[string]string{"seldon": serviceNameExternal}, - Service: serviceName + "." + namespace + ":" + strconv.Itoa(engine_grpc_port), - TimeoutMs: timeout, + ApiVersion: "ambassador/v1", + Kind: "Mapping", + Name: "seldon_" + mlDep.ObjectMeta.Name + "_" + name + "_grpc_mapping", + Grpc: &grpc, + Prefix: constants.GRPCRegExMatchAmbassador, + PrefixRegex: &grpc, + Rewrite: "", + Headers: map[string]string{"seldon": serviceNameExternal}, + Service: serviceName + "." + namespace + ":" + strconv.Itoa(engine_grpc_port), + TimeoutMs: timeout, RetryPolicy: &AmbassadorRetryPolicy{ RetryOn: "connect-failure", NumRetries: 3, diff --git a/operator/controllers/seldondeployment_controller.go b/operator/controllers/seldondeployment_controller.go index a318ee97aa..a851feae49 100644 --- a/operator/controllers/seldondeployment_controller.go +++ b/operator/controllers/seldondeployment_controller.go @@ -160,7 +160,7 @@ func createIstioResources(mlDep *machinelearningv1.SeldonDeployment, { Match: []istio.HTTPMatchRequest{ { - URI: &v1alpha1.StringMatch{Prefix: "/seldon.protos.Seldon/"}, + URI: &v1alpha1.StringMatch{Regex: constants.GRPCRegExMatchIstio}, Headers: map[string]v1alpha1.StringMatch{ "seldon": v1alpha1.StringMatch{Exact: mlDep.Name}, "namespace": v1alpha1.StringMatch{Exact: namespace}, @@ -602,6 +602,9 @@ func createContainerService(deploy *appsv1.Deployment, p machinelearningv1.Predi } namespace := getNamespace(mlDep) portType := "http" + if pu.Endpoint.Type == machinelearningv1.GRPC { + portType = "grpc" + } var portNum int32 portNum = 0 existingPort := machinelearningv1.GetPort(portType, con.Ports) @@ -609,10 +612,6 @@ func createContainerService(deploy *appsv1.Deployment, p machinelearningv1.Predi portNum = existingPort.ContainerPort } - if pu.Endpoint.Type == machinelearningv1.GRPC { - portType = "grpc" - } - // pu should have a port set by seldondeployment_create_update_handler.go (if not by user) // that mutator modifies SeldonDeployment and fires before this controller if pu.Endpoint.ServicePort != 0 { diff --git a/operator/controllers/seldondeployment_engine.go b/operator/controllers/seldondeployment_engine.go index 035824d5b1..07f387e562 100644 --- a/operator/controllers/seldondeployment_engine.go +++ b/operator/controllers/seldondeployment_engine.go @@ -29,6 +29,16 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" ) +const ( + ENV_DEFAULT_EXECUTOR_SERVER_PORT = "EXECUTOR_SERVER_PORT" + ENV_DEFAULT_EXECUTOR_SERVER_GRPC_PORT = "EXECUTOR_SERVER_GRPC_PORT" + ENV_EXECUTOR_PROMETHEUS_PATH = "EXECUTOR_PROMETHEUS_PATH" + ENV_ENGINE_PROMETHEUS_PATH = "ENGINE_PROMETHEUS_PATH" + + DEFAULT_EXECUTOR_CONTAINER_PORT = 8000 + DEFAULT_EXECUTOR_GRPC_PORT = 5001 +) + var ( EngineContainerName = "seldon-container-engine" ) @@ -61,9 +71,7 @@ func addEngineToDeployment(mlDep *machinelearningv1.SeldonDeployment, p *machine } deploy.Spec.Template.Spec.Containers = append(deploy.Spec.Template.Spec.Containers, *engineContainer) - //deploy.Spec.Template.Spec.ServiceAccountName = GetEnv("ENGINE_CONTAINER_SERVICE_ACCOUNT_NAME", "seldon") - //deploy.Spec.Template.Spec.DeprecatedServiceAccount = deploy.Spec.Template.Spec.ServiceAccountName - //deploy.Spec.Template.Annotations = map[string]string{} + if deploy.Spec.Template.Annotations == nil { deploy.Spec.Template.Annotations = make(map[string]string) } @@ -72,7 +80,7 @@ func addEngineToDeployment(mlDep *machinelearningv1.SeldonDeployment, p *machine deploy.Spec.Template.Annotations[ann] = p.Annotations[ann] } // Add prometheus annotations - deploy.Spec.Template.Annotations["prometheus.io/path"] = GetEnv("ENGINE_PROMETHEUS_PATH", "/prometheus") + deploy.Spec.Template.Annotations["prometheus.io/path"] = getPrometheusPath(mlDep) deploy.Spec.Template.Annotations["prometheus.io/port"] = strconv.Itoa(engine_http_port) deploy.Spec.Template.Annotations["prometheus.io/scrape"] = "true" @@ -98,42 +106,150 @@ func addEngineToDeployment(mlDep *machinelearningv1.SeldonDeployment, p *machine return nil } -// Create the Container for the service orchestrator. -func createEngineContainer(mlDep *machinelearningv1.SeldonDeployment, p *machinelearningv1.PredictorSpec, engine_http_port, engine_grpc_port int) (*corev1.Container, error) { - // Get engine user - var engineUser int64 = -1 - if engineUserEnv, ok := os.LookupEnv("ENGINE_CONTAINER_USER"); ok { - user, err := strconv.Atoi(engineUserEnv) +func getExecutorHttpPort() (engine_http_port int, err error) { + // Get engine http port from environment or use default + engine_http_port = DEFAULT_EXECUTOR_CONTAINER_PORT + var env_engine_http_port = GetEnv(ENV_DEFAULT_EXECUTOR_SERVER_PORT, "") + if env_engine_http_port != "" { + engine_http_port, err = strconv.Atoi(env_engine_http_port) if err != nil { - return nil, err - } else { - engineUser = int64(user) + return 0, err } } - // get predictor as base64 encoded json - pCopy := p.DeepCopy() - // Set traffic to zero to ensure this doesn't cause a diff in the resulting deployment created - pCopy.Traffic = 0 - predictorB64, err := getEngineVarJson(pCopy) - if err != nil { - return nil, err + return engine_http_port, nil +} + +func getExecutorGrpcPort() (engine_grpc_port int, err error) { + // Get engine grpc port from environment or use default + engine_grpc_port = DEFAULT_EXECUTOR_GRPC_PORT + var env_engine_grpc_port = GetEnv(ENV_DEFAULT_EXECUTOR_SERVER_GRPC_PORT, "") + if env_engine_grpc_port != "" { + engine_grpc_port, err = strconv.Atoi(env_engine_grpc_port) + if err != nil { + return 0, err + } } + return engine_grpc_port, nil +} - //get annotation for java opts or default - javaOpts := getAnnotation(mlDep, machinelearningv1.ANNOTATION_JAVA_OPTS, "-server -Dcom.sun.management.jmxremote.rmi.port=9090 -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=9090 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.local.only=false -Djava.rmi.server.hostname=127.0.0.1") +func isExecutorEnabled(mlDep *machinelearningv1.SeldonDeployment) bool { + useExecutor := getAnnotation(mlDep, machinelearningv1.ANNOTATION_EXECUTOR, "false") + useExecutorEnv := GetEnv("USE_EXECUTOR", "false") + return useExecutor == "true" || useExecutorEnv == "true" +} - //Engine resources - engineResources := p.SvcOrchSpec.Resources - if engineResources == nil { - cpuQuantity, _ := resource.ParseQuantity("0.1") - engineResources = &corev1.ResourceRequirements{ - Requests: map[corev1.ResourceName]resource.Quantity{ - corev1.ResourceCPU: cpuQuantity, - }, +func getPrometheusPath(mlDep *machinelearningv1.SeldonDeployment) string { + prometheusPath := "/prometheus" + if isExecutorEnabled(mlDep) { + prometheusPath = GetEnv(ENV_EXECUTOR_PROMETHEUS_PATH, prometheusPath) + } else { + prometheusPath = GetEnv(ENV_ENGINE_PROMETHEUS_PATH, prometheusPath) + } + return prometheusPath +} + +func getSvcOrchSvcAccountName(mlDep *machinelearningv1.SeldonDeployment) string { + svcAccount := "default" + if isExecutorEnabled(mlDep) { + if svcAccountTmp, ok := os.LookupEnv("EXECUTOR_CONTAINER_SERVICE_ACCOUNT_NAME"); ok { + svcAccount = svcAccountTmp + } + } else { + if svcAccountTmp, ok := os.LookupEnv("ENGINE_CONTAINER_SERVICE_ACCOUNT_NAME"); ok { + svcAccount = svcAccountTmp } } + return svcAccount +} + +func getSvcOrchUser(mlDep *machinelearningv1.SeldonDeployment) (int64, error) { + var engineUser int64 = -1 + if isExecutorEnabled(mlDep) { + if engineUserEnv, ok := os.LookupEnv("EXECUTOR_CONTAINER_USER"); ok { + user, err := strconv.Atoi(engineUserEnv) + if err != nil { + return -1, err + } else { + engineUser = int64(user) + } + } - c := corev1.Container{ + } else { + if engineUserEnv, ok := os.LookupEnv("ENGINE_CONTAINER_USER"); ok { + user, err := strconv.Atoi(engineUserEnv) + if err != nil { + return -1, err + } else { + engineUser = int64(user) + } + } + } + return engineUser, nil +} + +func createExecutorContainer(mlDep *machinelearningv1.SeldonDeployment, p *machinelearningv1.PredictorSpec, predictorB64 string, http_port int, grpc_port int, resources *corev1.ResourceRequirements) corev1.Container { + transport := p.Transport + //Backwards compatible with older resources + if transport == "" { + if p.Graph.Endpoint.Type == machinelearningv1.GRPC { + transport = machinelearningv1.TransportGrpc + } else { + transport = machinelearningv1.TransportRest + } + } + protocol := p.Protocol + //Backwards compatibility for older resources + if protocol == "" { + protocol = machinelearningv1.ProtocolSeldon + } + return corev1.Container{ + Name: EngineContainerName, + Image: GetEnv("EXECUTOR_CONTAINER_IMAGE_AND_VERSION", "seldonio/seldon-core-executor:1.0.1-SNAPSHOT"), + Args: []string{ + "--sdep", mlDep.Name, + "--namespace", mlDep.Namespace, + "--predictor", p.Name, + "--http_port", strconv.Itoa(http_port), + "--grpc_port", strconv.Itoa(grpc_port), + "--transport", string(transport), + "--protocol", string(protocol), + "--prometheus_path", getPrometheusPath(mlDep), + }, + ImagePullPolicy: corev1.PullPolicy(GetEnv("EXECUTOR_CONTAINER_IMAGE_PULL_POLICY", "IfNotPresent")), + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: corev1.TerminationMessageReadFile, + VolumeMounts: []corev1.VolumeMount{ + { + Name: machinelearningv1.PODINFO_VOLUME_NAME, + MountPath: machinelearningv1.PODINFO_VOLUME_PATH, + }, + }, + Env: []corev1.EnvVar{ + {Name: "ENGINE_PREDICTOR", Value: predictorB64}, + }, + Ports: []corev1.ContainerPort{ + {ContainerPort: int32(http_port), Protocol: corev1.ProtocolTCP}, + {ContainerPort: int32(grpc_port), Protocol: corev1.ProtocolTCP}, + }, + ReadinessProbe: &corev1.Probe{Handler: corev1.Handler{HTTPGet: &corev1.HTTPGetAction{Port: intstr.FromInt(http_port), Path: "/ready", Scheme: corev1.URISchemeHTTP}}, + InitialDelaySeconds: 20, + PeriodSeconds: 5, + FailureThreshold: 3, + SuccessThreshold: 1, + TimeoutSeconds: 60}, + LivenessProbe: &corev1.Probe{Handler: corev1.Handler{HTTPGet: &corev1.HTTPGetAction{Port: intstr.FromInt(http_port), Path: "/live", Scheme: corev1.URISchemeHTTP}}, + InitialDelaySeconds: 20, + PeriodSeconds: 5, + FailureThreshold: 3, + SuccessThreshold: 1, + TimeoutSeconds: 60}, + Resources: *resources, + } +} + +func createEngineContainerSpec(mlDep *machinelearningv1.SeldonDeployment, p *machinelearningv1.PredictorSpec, predictorB64 string, + engine_http_port int, engine_grpc_port int, engineResources *corev1.ResourceRequirements) corev1.Container { + return corev1.Container{ Name: EngineContainerName, Image: GetEnv("ENGINE_CONTAINER_IMAGE_AND_VERSION", "seldonio/engine:0.4.0"), ImagePullPolicy: corev1.PullPolicy(GetEnv("ENGINE_CONTAINER_IMAGE_PULL_POLICY", "IfNotPresent")), @@ -151,7 +267,7 @@ func createEngineContainer(mlDep *machinelearningv1.SeldonDeployment, p *machine {Name: "DEPLOYMENT_NAMESPACE", Value: mlDep.ObjectMeta.Namespace}, {Name: "ENGINE_SERVER_PORT", Value: strconv.Itoa(engine_http_port)}, {Name: "ENGINE_SERVER_GRPC_PORT", Value: strconv.Itoa(engine_grpc_port)}, - {Name: "JAVA_OPTS", Value: javaOpts}, + {Name: "JAVA_OPTS", Value: getAnnotation(mlDep, machinelearningv1.ANNOTATION_JAVA_OPTS, "-server -Dcom.sun.management.jmxremote.rmi.port=9090 -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=9090 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.local.only=false -Djava.rmi.server.hostname=127.0.0.1")}, }, Ports: []corev1.ContainerPort{ {ContainerPort: int32(engine_http_port), Protocol: corev1.ProtocolTCP}, @@ -178,6 +294,49 @@ func createEngineContainer(mlDep *machinelearningv1.SeldonDeployment, p *machine }, Resources: *engineResources, } +} + +// Create the Container for the service orchestrator. +func createEngineContainer(mlDep *machinelearningv1.SeldonDeployment, p *machinelearningv1.PredictorSpec, engine_http_port, engine_grpc_port int) (*corev1.Container, error) { + // Get engine user + engineUser, err := getSvcOrchUser(mlDep) + if err != nil { + return nil, err + } + // get predictor as base64 encoded json + pCopy := p.DeepCopy() + // Set traffic to zero to ensure this doesn't cause a diff in the resulting deployment created + pCopy.Traffic = 0 + predictorB64, err := getEngineVarJson(pCopy) + if err != nil { + return nil, err + } + + //Engine resources + engineResources := p.SvcOrchSpec.Resources + if engineResources == nil { + cpuQuantity, _ := resource.ParseQuantity("0.1") + engineResources = &corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: cpuQuantity, + }, + } + } + + var c corev1.Container + if isExecutorEnabled(mlDep) { + executor_http_port, err := getExecutorHttpPort() + if err != nil { + return nil, err + } + executor_grpc_port, err := getExecutorGrpcPort() + if err != nil { + return nil, err + } + c = createExecutorContainer(mlDep, p, predictorB64, executor_http_port, executor_grpc_port, engineResources) + } else { + c = createEngineContainerSpec(mlDep, p, predictorB64, engine_http_port, engine_grpc_port, engineResources) + } if engineUser != -1 { c.SecurityContext = &corev1.SecurityContext{RunAsUser: &engineUser} @@ -240,7 +399,7 @@ func createEngineDeployment(mlDep *machinelearningv1.SeldonDeployment, p *machin ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{machinelearningv1.Label_seldon_app: seldonId, machinelearningv1.Label_seldon_id: seldonId, "app": depName}, Annotations: map[string]string{ - "prometheus.io/path": GetEnv("ENGINE_PROMETHEUS_PATH", "/prometheus"), + "prometheus.io/path": getPrometheusPath(mlDep), "prometheus.io/port": strconv.Itoa(engine_http_port), "prometheus.io/scrape": "true", }, @@ -267,13 +426,9 @@ func createEngineDeployment(mlDep *machinelearningv1.SeldonDeployment, p *machin } // Add a particular service account rather than default for the engine - if svcAccount, ok := os.LookupEnv("ENGINE_CONTAINER_SERVICE_ACCOUNT_NAME"); ok { - deploy.Spec.Template.Spec.ServiceAccountName = svcAccount - deploy.Spec.Template.Spec.DeprecatedServiceAccount = svcAccount - } else { - deploy.Spec.Template.Spec.ServiceAccountName = "default" - deploy.Spec.Template.Spec.DeprecatedServiceAccount = "default" - } + svcAccountName := getSvcOrchSvcAccountName(mlDep) + deploy.Spec.Template.Spec.ServiceAccountName = svcAccountName + deploy.Spec.Template.Spec.DeprecatedServiceAccount = svcAccountName // add predictor labels for k, v := range p.Labels { diff --git a/operator/controllers/seldondeployment_prepackaged_servers.go b/operator/controllers/seldondeployment_prepackaged_servers.go index 42e49a1fae..69bd7270d0 100644 --- a/operator/controllers/seldondeployment_prepackaged_servers.go +++ b/operator/controllers/seldondeployment_prepackaged_servers.go @@ -23,83 +23,88 @@ import ( "github.com/seldonio/seldon-core/operator/utils" appsv1 "k8s.io/api/apps/v1" "k8s.io/api/core/v1" + "strconv" "strings" ) -func addTFServerContainer(r *SeldonDeploymentReconciler, pu *machinelearningv1.PredictiveUnit, p *machinelearningv1.PredictorSpec, deploy *appsv1.Deployment, serverConfig machinelearningv1.PredictorServerConfig) error { +func createTensorflowServingContainer(pu *machinelearningv1.PredictiveUnit, usePUPorts bool) *v1.Container { + ServerConfig := machinelearningv1.GetPrepackServerConfig(string(*pu.Implementation)) - if len(*pu.Implementation) > 0 && (serverConfig.Tensorflow || serverConfig.TensorflowImage != "") { + tfImage := "tensorflow/serving:latest" - ty := machinelearningv1.MODEL - pu.Type = &ty + if ServerConfig.TensorflowImage != "" { + tfImage = ServerConfig.TensorflowImage + } - if pu.Endpoint == nil { - pu.Endpoint = &machinelearningv1.Endpoint{Type: machinelearningv1.REST} + grpcPort := int32(constants.TfServingGrpcPort) + restPort := int32(constants.TfServingRestPort) + name := constants.TFServingContainerName + if usePUPorts { + if pu.Endpoint.Type == machinelearningv1.GRPC { + grpcPort = pu.Endpoint.ServicePort + } else { + restPort = pu.Endpoint.ServicePort } + name = pu.Name + } - c := utils.GetContainerForDeployment(deploy, pu.Name) - existing := c != nil - if !existing { - c = &v1.Container{ - Name: pu.Name, - VolumeMounts: []v1.VolumeMount{ - { - Name: machinelearningv1.PODINFO_VOLUME_NAME, - MountPath: machinelearningv1.PODINFO_VOLUME_PATH, - }, - }, - } - } + return &v1.Container{ + Name: name, + Image: tfImage, + Args: []string{ + "/usr/bin/tensorflow_model_server", + constants.TfServingArgPort + strconv.Itoa(int(grpcPort)), + constants.TfServingArgRestPort + strconv.Itoa(int(restPort)), + "--model_name=" + pu.Name, + "--model_base_path=" + DefaultModelLocalMountPath}, + ImagePullPolicy: v1.PullIfNotPresent, + Ports: []v1.ContainerPort{ + { + ContainerPort: grpcPort, + Protocol: v1.ProtocolTCP, + }, + { + ContainerPort: restPort, + Protocol: v1.ProtocolTCP, + }, + }, + } +} - //Add missing fields - machinelearningv1.SetImageNameForPrepackContainer(pu, c) - SetUriParamsForTFServingProxyContainer(pu, c) +func addTFServerContainer(r *SeldonDeploymentReconciler, pu *machinelearningv1.PredictiveUnit, p *machinelearningv1.PredictorSpec, deploy *appsv1.Deployment, serverConfig machinelearningv1.PredictorServerConfig) error { - // Add container to deployment - if !existing { - if len(deploy.Spec.Template.Spec.Containers) > 0 { - deploy.Spec.Template.Spec.Containers = append(deploy.Spec.Template.Spec.Containers, *c) - } else { - deploy.Spec.Template.Spec.Containers = []v1.Container{*c} - } - } + if len(*pu.Implementation) > 0 && (serverConfig.Tensorflow || serverConfig.TensorflowImage != "") { - tfServingContainer := utils.GetContainerForDeployment(deploy, constants.TFServingContainerName) - existing = tfServingContainer != nil - if !existing { - ServerConfig := machinelearningv1.GetPrepackServerConfig(string(*pu.Implementation)) + ty := machinelearningv1.MODEL + pu.Type = &ty - tfImage := "tensorflow/serving:latest" + c := utils.GetContainerForDeployment(deploy, pu.Name) - if ServerConfig.TensorflowImage != "" { - tfImage = ServerConfig.TensorflowImage + var tfServingContainer *v1.Container + if p.Protocol == machinelearningv1.ProtocolTensorflow { + tfServingContainer = createTensorflowServingContainer(pu, true) + containers := make([]v1.Container, len(deploy.Spec.Template.Spec.Containers)) + for i, ctmp := range deploy.Spec.Template.Spec.Containers { + if ctmp.Name == pu.Name { + containers[i] = *tfServingContainer + } else { + containers[i] = ctmp + } } + deploy.Spec.Template.Spec.Containers = containers - tfServingContainer = &v1.Container{ - Name: constants.TFServingContainerName, - Image: tfImage, - Args: []string{ - "/usr/bin/tensorflow_model_server", - "--port=2000", - "--rest_api_port=2001", - "--model_name=" + pu.Name, - "--model_base_path=" + DefaultModelLocalMountPath}, - ImagePullPolicy: v1.PullIfNotPresent, - Ports: []v1.ContainerPort{ - { - ContainerPort: 2000, - Protocol: v1.ProtocolTCP, - }, - { - ContainerPort: 2001, - Protocol: v1.ProtocolTCP, - }, - }, + } else { + //Add missing fields + machinelearningv1.SetImageNameForPrepackContainer(pu, c) + SetUriParamsForTFServingProxyContainer(pu, c) + + tfServingContainer = utils.GetContainerForDeployment(deploy, constants.TFServingContainerName) + existing := tfServingContainer != nil + if !existing { + tfServingContainer = createTensorflowServingContainer(pu, false) + deploy.Spec.Template.Spec.Containers = append(deploy.Spec.Template.Spec.Containers, *tfServingContainer) } - } - if !existing { - deploy.Spec.Template.Spec.Containers = append(deploy.Spec.Template.Spec.Containers, *tfServingContainer) } _, err := InjectModelInitializer(deploy, tfServingContainer.Name, pu.ModelURI, pu.ServiceAccountName, pu.EnvSecretRefName, r) diff --git a/operator/controllers/seldondeployment_prepackaged_servers_test.go b/operator/controllers/seldondeployment_prepackaged_servers_test.go index 9a621a62b2..3c452c65c5 100644 --- a/operator/controllers/seldondeployment_prepackaged_servers_test.go +++ b/operator/controllers/seldondeployment_prepackaged_servers_test.go @@ -5,24 +5,29 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" machinelearningv1 "github.com/seldonio/seldon-core/operator/apis/machinelearning/v1" + "github.com/seldonio/seldon-core/operator/constants" "github.com/seldonio/seldon-core/operator/utils" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "strconv" + "strings" "time" ) var _ = Describe("Create a prepacked sklearn server", func() { const timeout = time.Second * 30 const interval = time.Second * 1 + const name = "pp1" + const sdepName = "prepack1" By("Creating a resource") It("should create a resource with defaults", func() { Expect(k8sClient).NotTo(BeNil()) var modelType = machinelearningv1.MODEL - var impl = machinelearningv1.PredictiveUnitImplementation("SKLEARN_SERVER") + var impl = machinelearningv1.PredictiveUnitImplementation(constants.PrePackedServerSklearn) key := types.NamespacedName{ - Name: "prepack", + Name: sdepName, Namespace: "default", } instance := &machinelearningv1.SeldonDeployment{ @@ -31,7 +36,7 @@ var _ = Describe("Create a prepacked sklearn server", func() { Namespace: key.Namespace, }, Spec: machinelearningv1.SeldonDeploymentSpec{ - Name: "pp", + Name: name, Predictors: []machinelearningv1.PredictorSpec{ { Name: "p1", @@ -65,7 +70,7 @@ var _ = Describe("Create a prepacked sklearn server", func() { err := k8sClient.Get(context.Background(), key, fetched) return err }, timeout, interval).Should(BeNil()) - Expect(fetched.Spec.Name).Should(Equal("pp")) + Expect(fetched.Spec.Name).Should(Equal(name)) sPodSpec := utils.GetSeldonPodSpecForPredictiveUnit(&instance.Spec.Predictors[0], instance.Spec.Predictors[0].Graph.Name) depName := machinelearningv1.GetDeploymentName(instance, instance.Spec.Predictors[0], sPodSpec) @@ -84,3 +89,266 @@ var _ = Describe("Create a prepacked sklearn server", func() { }) }) + +var _ = Describe("Create a prepacked tfserving server for Seldon protocol and REST", func() { + const timeout = time.Second * 30 + const interval = time.Second * 1 + const name = "pp2" + const sdepName = "prepack2" + By("Creating a resource") + It("should create a resource with defaults", func() { + Expect(k8sClient).NotTo(BeNil()) + var modelType = machinelearningv1.MODEL + var impl = machinelearningv1.PredictiveUnitImplementation(constants.PrePackedServerTensorflow) + key := types.NamespacedName{ + Name: sdepName, + Namespace: "default", + } + instance := &machinelearningv1.SeldonDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + Spec: machinelearningv1.SeldonDeploymentSpec{ + Name: name, + Predictors: []machinelearningv1.PredictorSpec{ + { + Name: name, + Graph: &machinelearningv1.PredictiveUnit{ + Name: "classifier", + Type: &modelType, + Implementation: &impl, + Endpoint: &machinelearningv1.Endpoint{Type: machinelearningv1.REST}, + }, + }, + }, + }, + } + + configMapName := types.NamespacedName{Name: "seldon-config", + Namespace: "seldon-system"} + + configResult := &corev1.ConfigMap{} + const timeout = time.Second * 30 + Eventually(func() error { return k8sClient.Get(context.TODO(), configMapName, configResult) }, timeout). + Should(Succeed()) + + // Run Defaulter + instance.Default() + + Expect(k8sClient.Create(context.Background(), instance)).Should(Succeed()) + //time.Sleep(time.Second * 5) + + fetched := &machinelearningv1.SeldonDeployment{} + Eventually(func() error { + err := k8sClient.Get(context.Background(), key, fetched) + return err + }, timeout, interval).Should(BeNil()) + Expect(fetched.Spec.Name).Should(Equal(name)) + + sPodSpec := utils.GetSeldonPodSpecForPredictiveUnit(&instance.Spec.Predictors[0], instance.Spec.Predictors[0].Graph.Name) + depName := machinelearningv1.GetDeploymentName(instance, instance.Spec.Predictors[0], sPodSpec) + depKey := types.NamespacedName{ + Name: depName, + Namespace: "default", + } + depFetched := &appsv1.Deployment{} + Eventually(func() error { + err := k8sClient.Get(context.Background(), depKey, depFetched) + return err + }, timeout, interval).Should(BeNil()) + Expect(len(depFetched.Spec.Template.Spec.Containers)).Should(Equal(3)) + for _, c := range depFetched.Spec.Template.Spec.Containers { + if c.Name == constants.TFServingContainerName { + for _, arg := range c.Args { + if strings.Index(arg, constants.TfServingArgPort) == 0 { + Expect(arg).To(Equal(constants.TfServingArgPort + strconv.Itoa(constants.TfServingGrpcPort))) + } + if strings.Index(arg, constants.TfServingArgRestPort) == 0 { + Expect(arg).To(Equal(constants.TfServingArgRestPort + strconv.Itoa(constants.TfServingRestPort))) + } + } + } + } + + Expect(k8sClient.Delete(context.Background(), instance)).Should(Succeed()) + }) + +}) + +var _ = Describe("Create a prepacked tfserving server for tensorflow protocol and REST", func() { + const timeout = time.Second * 30 + const interval = time.Second * 1 + const name = "pp3" + const sdepName = "prepack3" + modelName := "classifier" + By("Creating a resource") + It("should create a resource with defaults", func() { + Expect(k8sClient).NotTo(BeNil()) + var modelType = machinelearningv1.MODEL + var impl = machinelearningv1.PredictiveUnitImplementation(constants.PrePackedServerTensorflow) + key := types.NamespacedName{ + Name: sdepName, + Namespace: "default", + } + instance := &machinelearningv1.SeldonDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + Spec: machinelearningv1.SeldonDeploymentSpec{ + Name: name, + Predictors: []machinelearningv1.PredictorSpec{ + { + Name: "p1", + Protocol: machinelearningv1.ProtocolTensorflow, + Graph: &machinelearningv1.PredictiveUnit{ + Name: modelName, + Type: &modelType, + Implementation: &impl, + Endpoint: &machinelearningv1.Endpoint{Type: machinelearningv1.REST}, + }, + }, + }, + }, + } + + configMapName := types.NamespacedName{Name: "seldon-config", + Namespace: "seldon-system"} + + configResult := &corev1.ConfigMap{} + const timeout = time.Second * 30 + Eventually(func() error { return k8sClient.Get(context.TODO(), configMapName, configResult) }, timeout). + Should(Succeed()) + + // Run Defaulter + instance.Default() + + Expect(k8sClient.Create(context.Background(), instance)).Should(Succeed()) + //time.Sleep(time.Second * 5) + + fetched := &machinelearningv1.SeldonDeployment{} + Eventually(func() error { + err := k8sClient.Get(context.Background(), key, fetched) + return err + }, timeout, interval).Should(BeNil()) + Expect(fetched.Spec.Name).Should(Equal(name)) + + sPodSpec := utils.GetSeldonPodSpecForPredictiveUnit(&instance.Spec.Predictors[0], instance.Spec.Predictors[0].Graph.Name) + depName := machinelearningv1.GetDeploymentName(instance, instance.Spec.Predictors[0], sPodSpec) + depKey := types.NamespacedName{ + Name: depName, + Namespace: "default", + } + depFetched := &appsv1.Deployment{} + Eventually(func() error { + err := k8sClient.Get(context.Background(), depKey, depFetched) + return err + }, timeout, interval).Should(BeNil()) + Expect(len(depFetched.Spec.Template.Spec.Containers)).Should(Equal(2)) + for _, c := range depFetched.Spec.Template.Spec.Containers { + if c.Name == modelName { + for _, arg := range c.Args { + if strings.Index(arg, constants.TfServingArgPort) == 0 { + Expect(arg).To(Equal(constants.TfServingArgPort + strconv.Itoa(constants.TfServingGrpcPort))) + } + if strings.Index(arg, constants.TfServingArgRestPort) == 0 { + Expect(arg).To(Equal(constants.TfServingArgRestPort + strconv.Itoa(constants.FirstPortNumber))) + } + } + } + } + + Expect(k8sClient.Delete(context.Background(), instance)).Should(Succeed()) + }) + +}) + +var _ = Describe("Create a prepacked tfserving server for tensorflow protocol and grpc", func() { + const timeout = time.Second * 30 + const interval = time.Second * 1 + const name = "pp4" + const sdepName = "prepack4" + modelName := "classifier" + By("Creating a resource") + It("should create a resource with defaults", func() { + Expect(k8sClient).NotTo(BeNil()) + var modelType = machinelearningv1.MODEL + var impl = machinelearningv1.PredictiveUnitImplementation(constants.PrePackedServerTensorflow) + key := types.NamespacedName{ + Name: sdepName, + Namespace: "default", + } + instance := &machinelearningv1.SeldonDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + Spec: machinelearningv1.SeldonDeploymentSpec{ + Name: name, + Predictors: []machinelearningv1.PredictorSpec{ + { + Name: "p1", + Protocol: machinelearningv1.ProtocolTensorflow, + Transport: machinelearningv1.TransportGrpc, + Graph: &machinelearningv1.PredictiveUnit{ + Name: modelName, + Type: &modelType, + Implementation: &impl, + Endpoint: &machinelearningv1.Endpoint{Type: machinelearningv1.GRPC}, + }, + }, + }, + }, + } + + configMapName := types.NamespacedName{Name: "seldon-config", + Namespace: "seldon-system"} + + configResult := &corev1.ConfigMap{} + const timeout = time.Second * 30 + Eventually(func() error { return k8sClient.Get(context.TODO(), configMapName, configResult) }, timeout). + Should(Succeed()) + + // Run Defaulter + instance.Default() + + Expect(k8sClient.Create(context.Background(), instance)).Should(Succeed()) + //time.Sleep(time.Second * 5) + + fetched := &machinelearningv1.SeldonDeployment{} + Eventually(func() error { + err := k8sClient.Get(context.Background(), key, fetched) + return err + }, timeout, interval).Should(BeNil()) + Expect(fetched.Spec.Name).Should(Equal(name)) + + sPodSpec := utils.GetSeldonPodSpecForPredictiveUnit(&instance.Spec.Predictors[0], instance.Spec.Predictors[0].Graph.Name) + depName := machinelearningv1.GetDeploymentName(instance, instance.Spec.Predictors[0], sPodSpec) + depKey := types.NamespacedName{ + Name: depName, + Namespace: "default", + } + depFetched := &appsv1.Deployment{} + Eventually(func() error { + err := k8sClient.Get(context.Background(), depKey, depFetched) + return err + }, timeout, interval).Should(BeNil()) + Expect(len(depFetched.Spec.Template.Spec.Containers)).Should(Equal(2)) + for _, c := range depFetched.Spec.Template.Spec.Containers { + if c.Name == modelName { + for _, arg := range c.Args { + if strings.Index(arg, constants.TfServingArgPort) == 0 { + Expect(arg).To(Equal(constants.TfServingArgPort + strconv.Itoa(constants.FirstPortNumber))) + } + if strings.Index(arg, constants.TfServingArgRestPort) == 0 { + Expect(arg).To(Equal(constants.TfServingArgRestPort + strconv.Itoa(constants.TfServingRestPort))) + } + } + } + } + + Expect(k8sClient.Delete(context.Background(), instance)).Should(Succeed()) + }) + +}) diff --git a/operator/helm/split_resources.py b/operator/helm/split_resources.py index 338d9ad7c2..d592215b3c 100644 --- a/operator/helm/split_resources.py +++ b/operator/helm/split_resources.py @@ -36,8 +36,14 @@ "ISTIO_ENABLED":"istio.enabled", "ISTIO_GATEWAY":"istio.gateway", "ISTIO_TLS_MODE":"istio.tlsMode", - "PREDICTIVE_UNIT_SERVICE_PORT":"predictiveUnit.port" - + "PREDICTIVE_UNIT_SERVICE_PORT":"predictiveUnit.port", + "USE_EXECUTOR":"executor.enabled", + "EXECUTOR_SERVER_GRPC_PORT": "engine.grpc.port", + "EXECUTOR_CONTAINER_IMAGE_PULL_POLICY": "executor.image.pullPolicy", + "EXECUTOR_SERVER_PORT": "executor.port", + "EXECUTOR_PROMETHEUS_PATH": "executor.prometheus.path", + "EXECUTOR_CONTAINER_USER": "executor.user", + "EXECUTOR_CONTAINER_SERVICE_ACCOUNT_NAME": "executor.serviceAccount.name", } HELM_VALUES_IMAGE_PULL_POLICY = '{{ .Values.image.pullPolicy }}' @@ -90,8 +96,9 @@ def helm_release(value: str): if env["name"] in HELM_ENV_SUBST: env["value"] = helm_value(HELM_ENV_SUBST[env["name"]]) elif env["name"] == "ENGINE_CONTAINER_IMAGE_AND_VERSION": - env[ - "value"] = '{{ .Values.engine.image.registry }}/{{ .Values.engine.image.repository }}:{{ .Values.engine.image.tag }}' + env["value"] = '{{ .Values.engine.image.registry }}/{{ .Values.engine.image.repository }}:{{ .Values.engine.image.tag }}' + elif env["name"] == "EXECUTOR_CONTAINER_IMAGE_AND_VERSION": + env["value"] = '{{ .Values.executor.image.registry }}/{{ .Values.executor.image.repository }}:{{ .Values.executor.image.tag }}' elif env["name"] == "CONTROLLER_ID": env["value"] = "{{ .Values.controllerId }}" # Update webhook port diff --git a/python/seldon_core/seldon_methods.py b/python/seldon_core/seldon_methods.py index 259ec44c4a..a568f0c124 100644 --- a/python/seldon_core/seldon_methods.py +++ b/python/seldon_core/seldon_methods.py @@ -277,8 +277,8 @@ def route( def aggregate( - user_model: Any, request: prediction_pb2.SeldonMessageList -) -> prediction_pb2.SeldonMessage: + user_model: Any, request: Union[prediction_pb2.SeldonMessageList, List, Dict] +) -> Union[prediction_pb2.SeldonMessage, List, Dict]: """ Aggregate a list of payloads @@ -326,23 +326,25 @@ def aggregate( features_list = [] names_list = [] - if "seldonMessages" not in request or not isinstance( + if isinstance(request, list): + msgs = request + elif "seldonMessages" in request and isinstance( request["seldonMessages"], list ): + msgs = request["seldonMessages"] + else: raise SeldonMicroserviceException( f"Invalid request data type: {request}" ) - for msg in request["seldonMessages"]: + for msg in msgs: (features, meta, datadef, data_type) = extract_request_parts_json(msg) class_names = datadef["names"] if datadef and "names" in datadef else [] features_list.append(features) names_list.append(class_names) client_response = client_aggregate(user_model, features_list, names_list) - return construct_response_json( - user_model, False, request["seldonMessages"][0], client_response - ) + return construct_response_json(user_model, False, msgs[0], client_response) def health_status(user_model: Any) -> Union[prediction_pb2.SeldonMessage, List, Dict]: @@ -366,3 +368,21 @@ def health_status(user_model: Any) -> Union[prediction_pb2.SeldonMessage, List, client_response = client_health_status(user_model) return construct_response_json(user_model, False, {}, client_response) + + +def metadata(user_model: Any) -> Dict: + """ + Call the user model to get the model metadata + + Parameters + ---------- + user_model + User defined class instance + Returns + ------- + Model Metadata + """ + if hasattr(user_model, "metadata"): + return user_model.metadata() + else: + return {} diff --git a/python/seldon_core/user_model.py b/python/seldon_core/user_model.py index 607dc86dc5..2f02e187cd 100644 --- a/python/seldon_core/user_model.py +++ b/python/seldon_core/user_model.py @@ -109,6 +109,9 @@ def health_status(self) -> Union[np.ndarray, List, str, bytes]: def health_status_raw(self) -> prediction_pb2.SeldonMessage: raise SeldonNotImplementedError("health_raw is not implemented") + def metadata(self) -> Dict: + raise SeldonNotImplementedError("metadata is not implemented") + def client_custom_tags(user_model: SeldonComponent) -> Dict: """ diff --git a/python/seldon_core/wrapper.py b/python/seldon_core/wrapper.py index 969da7f724..6c28efe737 100644 --- a/python/seldon_core/wrapper.py +++ b/python/seldon_core/wrapper.py @@ -113,6 +113,13 @@ def HealthStatus(): logger.debug("REST Health Status Response: %s", response) return jsonify(response) + @app.route("/metadata", methods=["GET"]) + def Metadata(): + logger.debug("REST Metadata Request") + response = seldon_core.seldon_methods.metadata(user_model) + logger.debug("REST Metadata Response: %s", response) + return jsonify(response) + return app diff --git a/python/tests/test_combiner_microservice.py b/python/tests/test_combiner_microservice.py index 927c8a3802..27e843a165 100644 --- a/python/tests/test_combiner_microservice.py +++ b/python/tests/test_combiner_microservice.py @@ -1,4 +1,3 @@ -import pytest import json import numpy as np from google.protobuf import json_format @@ -81,7 +80,7 @@ class UserObjectBad(object): pass -def test_aggreate_ok(): +def test_aggreate_ok_seldon_messages(): user_object = UserObject() app = get_rest_microservice(user_object) client = app.test_client() @@ -96,6 +95,21 @@ def test_aggreate_ok(): assert j["data"]["ndarray"] == [1] +def test_aggreate_ok_list(): + user_object = UserObject() + app = get_rest_microservice(user_object) + client = app.test_client() + rv = client.get('/aggregate?json=[{"data":{"ndarray":[1]}}]') + print(rv) + j = json.loads(rv.data) + print(j) + assert rv.status_code == 200 + assert j["meta"]["tags"] == {"mytag": 1} + assert j["meta"]["metrics"][0]["key"] == user_object.metrics()[0]["key"] + assert j["meta"]["metrics"][0]["value"] == user_object.metrics()[0]["value"] + assert j["data"]["ndarray"] == [1] + + def test_aggreate_bad_user_object(): user_object = UserObjectBad() app = get_rest_microservice(user_object) diff --git a/python/tests/test_model_microservice.py b/python/tests/test_model_microservice.py index a55f745c70..d66045d626 100644 --- a/python/tests/test_model_microservice.py +++ b/python/tests/test_model_microservice.py @@ -20,6 +20,7 @@ HEALTH_PING_URL = "/health/ping" HEALTH_STATUS_URL = "/health/status" +METADATA_URL = "/metadata" """ Checksum of bytes. Used to check data integrity of binData passed in multipart/form-data request @@ -41,6 +42,7 @@ def rs232_checksum(the_bytes): class UserObject(SeldonComponent): HEALTH_STATUS_REPONSE = [0.123] + METADATA_RESPONSE = {"metadata": {"name": "mymodel"}} def __init__(self, metrics_ok=True, ret_nparray=False, ret_meta=False): self.metrics_ok = metrics_ok @@ -84,6 +86,9 @@ def metrics(self): def health_status(self): return self.predict(self.HEALTH_STATUS_REPONSE, ["some_float"]) + def metadata(self): + return self.METADATA_RESPONSE + class UserObjectLowLevel(SeldonComponent): HEALTH_STATUS_RAW_RESPONSE = [123.456, 7.89] @@ -645,6 +650,17 @@ def test_model_health_status_raw(): assert j["data"]["ndarray"] == UserObjectLowLevel.HEALTH_STATUS_RAW_RESPONSE +def test_model_metadata(): + user_object = UserObject() + app = get_rest_microservice(user_object) + client = app.test_client() + rv = client.get(METADATA_URL) + assert rv.status_code == 200 + j = json.loads(rv.data) + print(j) + assert j == UserObject.METADATA_RESPONSE + + def test_proto_ok(): user_object = UserObject() app = SeldonModelGRPC(user_object) diff --git a/servers/tfserving/samples/halfplustwo_rest.yaml b/servers/tfserving/samples/halfplustwo_rest.yaml new file mode 100644 index 0000000000..5181e8ff76 --- /dev/null +++ b/servers/tfserving/samples/halfplustwo_rest.yaml @@ -0,0 +1,21 @@ +apiVersion: machinelearning.seldon.io/v1alpha2 +kind: SeldonDeployment +metadata: + name: hpt +spec: + name: hpt + predictors: + - graph: + children: [] + implementation: TENSORFLOW_SERVER + modelUri: gs://seldon-models/tfserving/half_plus_two + name: halfplustwo + parameters: + - name: model_name + type: STRING + value: halfplustwo + name: default + replicas: 1 + protocol: tensorflow + transport: rest + diff --git a/testing/docker/fixed-model/Makefile b/testing/docker/fixed-model/Makefile index 39057aca26..b56b5b6184 100644 --- a/testing/docker/fixed-model/Makefile +++ b/testing/docker/fixed-model/Makefile @@ -1,10 +1,14 @@ build_images: - s2i build -E environment_rest_v1 . seldonio/seldon-core-s2i-python3:0.3 seldonio/fixed-model:0.1 - s2i build -E environment_rest_v2 . seldonio/seldon-core-s2i-python3:0.3 seldonio/fixed-model:0.2 + s2i build -E environment_rest_v1 . seldonio/seldon-core-s2i-python3:0.15 seldonio/fixed-model:0.1 + s2i build -E environment_rest_v2 . seldonio/seldon-core-s2i-python3:0.15 seldonio/fixed-model:0.2 push_images: docker push seldonio/fixed-model:0.1 docker push seldonio/fixed-model:0.2 + +kind_load_images: build_images + kind load -v 3 docker-image seldonio/fixed-model:0.1 + kind load -v 3 docker-image seldonio/fixed-model:0.2 diff --git a/testing/scripts/Makefile b/testing/scripts/Makefile index 0c8773a1ac..8d97fa3aa0 100644 --- a/testing/scripts/Makefile +++ b/testing/scripts/Makefile @@ -16,7 +16,13 @@ kind_build_engine: kind_build_operator: cd ../../operator && make kind-image-install -kind_build_images: build_protos kind_build_engine kind_build_operator +kind_build_executor: + cd ../../executor && make kind-image-install + +kind_build_fixed_models: + cd ../docker/fixed-model && make kind_load_images + +kind_build_images: build_protos kind_build_engine kind_build_operator kind_build_executor kind_build_fixed_models helm_setup: helm repo add stable https://kubernetes-charts.storage.googleapis.com/ @@ -32,7 +38,7 @@ install_cert_manager: install_seldon: install_cert_manager kubectl create namespace seldon-system || echo "namespace seldon-system exists" - helm install --wait seldon ../../helm-charts/seldon-core-operator --namespace seldon-system --set istio.enabled=true --set istio.gateway=seldon-gateway --set certManager.enabled=false + helm install --wait seldon ../../helm-charts/seldon-core-operator --namespace seldon-system --set istio.enabled=true --set istio.gateway=seldon-gateway --set certManager.enabled=false --set executor.enabled=true install_istio: kubectl apply -f istio-1.4.2.yaml diff --git a/testing/scripts/kind_test_all.sh b/testing/scripts/kind_test_all.sh index f36cd2e965..1329ec1268 100755 --- a/testing/scripts/kind_test_all.sh +++ b/testing/scripts/kind_test_all.sh @@ -67,6 +67,19 @@ if [[ ${KIND_EXIT_VALUE} -eq 0 ]]; then echo "SKIPPING ENGINE IMAGE BUILD..." fi + echo "Files changed in executor folder:" + git --no-pager diff --exit-code --name-only origin/master ../../executor + EXECUTOR_MODIFIED=$? + if [[ $EXECUTOR_MODIFIED -gt 0 ]]; then + make kind_build_executor + EXECUTOR_EXIT_VALUE=$? + else + echo "SKIPPING EXECUTOR IMAGE BUILD..." + fi + + echo "Build fixed models" + make kind_build_fixed_models + # KIND CLUSTER SETUP make kind_setup SETUP_EXIT_VALUE=$? diff --git a/testing/scripts/seldon_e2e_utils.py b/testing/scripts/seldon_e2e_utils.py index 5f5cdd9d39..197691d986 100644 --- a/testing/scripts/seldon_e2e_utils.py +++ b/testing/scripts/seldon_e2e_utils.py @@ -190,6 +190,62 @@ def initial_rest_request( return r +def initial_grpc_request( + model, + namespace, + endpoint=API_AMBASSADOR, + data_size=5, + rows=1, + data=None, + dtype="tensor", + names=None, +): + try: + return grpc_request_ambassador( + model, + namespace, + endpoint=endpoint, + data_size=data_size, + rows=rows, + data=data, + ) + except: + logging.warning("Sleeping 1 sec and trying again") + time.sleep(1) + try: + return grpc_request_ambassador( + model, + namespace, + endpoint=endpoint, + data_size=data_size, + rows=rows, + data=data, + ) + except: + logging.warning("Sleeping 5 sec and trying again") + time.sleep(5) + try: + return grpc_request_ambassador( + model, + namespace, + endpoint=endpoint, + data_size=data_size, + rows=rows, + data=data, + ) + except: + logging.warning("Sleeping 10 sec and trying again") + time.sleep(10) + return grpc_request_ambassador( + model, + namespace, + endpoint=endpoint, + data_size=data_size, + rows=rows, + data=data, + ) + + def create_random_data(data_size, rows=1): shape = [rows, data_size] arr = np.random.rand(rows * data_size) @@ -301,11 +357,6 @@ def rest_request_ambassador_auth( return response -@retry( - wait_exponential_multiplier=1000, - wait_exponential_max=10000, - stop_max_attempt_number=5, -) def grpc_request_ambassador( deployment_name, namespace, diff --git a/testing/scripts/test_api_version.py b/testing/scripts/test_api_version.py index 182e0f9573..81550da746 100644 --- a/testing/scripts/test_api_version.py +++ b/testing/scripts/test_api_version.py @@ -20,8 +20,6 @@ def test_api_version(namespace, apiVersion): command = ( "helm install mymodel ../../helm-charts/seldon-single-model " - "--set oauth.key=oauth-key " - "--set oauth.secret=oauth-secret " f"--set apiVersion={apiVersion} " f"--namespace {namespace}" ) diff --git a/testing/scripts/test_helm_charts_clusterwide.py b/testing/scripts/test_helm_charts_clusterwide.py index db3274b5e5..bc0cb0b866 100644 --- a/testing/scripts/test_helm_charts_clusterwide.py +++ b/testing/scripts/test_helm_charts_clusterwide.py @@ -2,12 +2,14 @@ wait_for_rollout, wait_for_status, initial_rest_request, + initial_grpc_request, rest_request_ambassador, grpc_request_ambassador2, API_AMBASSADOR, ) from subprocess import run import logging +import time class TestClusterWide(object): @@ -15,7 +17,7 @@ class TestClusterWide(object): # Test singe model helm script with 4 API methods def test_single_model(self, namespace): run( - f"helm install mymodel ../../helm-charts/seldon-single-model --set oauth.key=oauth-key --set oauth.secret=oauth-secret --namespace {namespace}", + f"helm install mymodel ../../helm-charts/seldon-single-model --namespace {namespace}", shell=True, check=True, ) @@ -27,15 +29,12 @@ def test_single_model(self, namespace): logging.warning(r.json()) assert r.status_code == 200 assert len(r.json()["data"]["tensor"]["values"]) == 1 - logging.warning("Test Ambassador gRPC gateway") - r = grpc_request_ambassador2("mymodel", namespace, API_AMBASSADOR) - logging.warning(r) run(f"helm delete mymodel", shell=True) # Test AB Test model helm script with 4 API methods def test_abtest_model(self, namespace): run( - f"helm install myabtest ../../helm-charts/seldon-abtest --set oauth.key=oauth-key --set oauth.secret=oauth-secret --namespace {namespace}", + f"helm install myabtest ../../helm-charts/seldon-abtest --namespace {namespace}", shell=True, check=True, ) @@ -56,7 +55,7 @@ def test_abtest_model(self, namespace): # Test MAB Test model helm script with 4 API methods def test_mab_model(self, namespace): run( - f"helm install mymab ../../helm-charts/seldon-mab --set oauth.key=oauth-key --set oauth.secret=oauth-secret --namespace {namespace}", + f"helm install mymab ../../helm-charts/seldon-mab --namespace {namespace}", shell=True, check=True, ) diff --git a/testing/scripts/test_local_operators.py b/testing/scripts/test_local_operators.py index 3fbca340a2..def590e053 100644 --- a/testing/scripts/test_local_operators.py +++ b/testing/scripts/test_local_operators.py @@ -13,7 +13,7 @@ class TestLocalOperators(object): def test_namespace_operator(self, namespace): retry_run( - f"helm install seldon ../../helm-charts/seldon-core-operator --namespace {namespace} --set istio.enabled=true --set istio.gateway=seldon-gateway --set certManager.enabled=false --set crd.create=false --set singleNamespace=true" + f"helm install seldon ../../helm-charts/seldon-core-operator --namespace {namespace} --set executor.enabled=true --set istio.enabled=true --set istio.gateway=seldon-gateway --set certManager.enabled=false --set crd.create=false --set singleNamespace=true" ) retry_run(f"kubectl apply -f ../resources/graph1.json -n {namespace}") wait_for_status("mymodel", namespace) @@ -28,7 +28,7 @@ def test_namespace_operator(self, namespace): def test_labelled_operator(self, namespace): retry_run( - f"helm install seldon ../../helm-charts/seldon-core-operator --namespace {namespace} --set istio.enabled=true --set istio.gateway=seldon-gateway --set certManager.enabled=false --set crd.create=false --set controllerId=seldon-id1" + f"helm install seldon ../../helm-charts/seldon-core-operator --namespace {namespace} --set executor.enabled=true --set istio.enabled=true --set istio.gateway=seldon-gateway --set certManager.enabled=false --set crd.create=false --set controllerId=seldon-id1" ) retry_run( f"kubectl apply -f ../resources/model_controller_id.yaml -n {namespace}" diff --git a/testing/scripts/test_rolling_updates.py b/testing/scripts/test_rolling_updates.py index 43f8c9fcf5..c2a7b74546 100644 --- a/testing/scripts/test_rolling_updates.py +++ b/testing/scripts/test_rolling_updates.py @@ -47,14 +47,8 @@ def test_rolling_update1(self, namespace, api_gateway): r = rest_request_ambassador("mymodel", namespace, api_gateway) assert r.status_code == 200 res = r.json() - assert ( - res["meta"]["requestPath"]["complex-model"] - == "seldonio/fixed-model:0.1" - and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] - ) or ( - res["meta"]["requestPath"]["complex-model"] - == "seldonio/fixed-model:0.2" - and res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] + assert (res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]) or ( + res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] ) if (not r.status_code == 200) or ( res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] @@ -89,15 +83,8 @@ def test_rolling_update2(self, namespace, api_gateway): r = rest_request_ambassador("mymodel", namespace, api_gateway) assert r.status_code == 200 res = r.json() - assert ( - "complex-model" in res["meta"]["requestPath"] - and res["meta"]["requestPath"]["complex-model"] - == "seldonio/fixed-model:0.1" - and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] - ) or ( - res["meta"]["requestPath"]["complex-model2"] - == "seldonio/fixed-model:0.2" - and res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] + assert (res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]) or ( + res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] ) if (not r.status_code == 200) or ( res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] @@ -132,14 +119,7 @@ def test_rolling_update3(self, namespace, api_gateway): r = rest_request_ambassador("mymodel", namespace, api_gateway) assert r.status_code == 200 res = r.json() - assert res["meta"]["requestPath"][ - "complex-model" - ] == "seldonio/fixed-model:0.1" and res["data"]["tensor"]["values"] == [ - 1.0, - 2.0, - 3.0, - 4.0, - ] + assert res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] time.sleep(1) assert i == 49 logging.warning("Success for test_rolling_update3") @@ -169,20 +149,9 @@ def test_rolling_update4(self, namespace, api_gateway): r = rest_request_ambassador("mymodel", namespace, api_gateway) assert r.status_code == 200 res = r.json() - assert ( - "complex-model" in res["meta"]["requestPath"] - and res["meta"]["requestPath"]["complex-model"] - == "seldonio/fixed-model:0.1" - and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] - ) or ( - res["meta"]["requestPath"]["model1"] == "seldonio/fixed-model:0.1" - and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] - and res["meta"]["requestPath"]["model2"] == "seldonio/fixed-model:0.1" - ) - if (not r.status_code == 200) or ("model1" in res["meta"]["requestPath"]): - break + assert res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] time.sleep(1) - assert i < 100 + assert i == 49 logging.warning("Success for test_rolling_update4") run(f"kubectl delete -f ../resources/graph1.json -n {namespace}", shell=True) run(f"kubectl delete -f ../resources/graph5.json -n {namespace}", shell=True) @@ -210,15 +179,8 @@ def test_rolling_update5(self, namespace, api_gateway): r = rest_request_ambassador("mymodel", namespace, api_gateway) assert r.status_code == 200 res = r.json() - assert ( - "complex-model" in res["meta"]["requestPath"] - and res["meta"]["requestPath"]["complex-model"] - == "seldonio/fixed-model:0.1" - and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] - ) or ( - res["meta"]["requestPath"]["complex-model"] - == "seldonio/fixed-model:0.2" - and res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] + assert (res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]) or ( + res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] ) if (not r.status_code == 200) or ( res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] @@ -253,14 +215,8 @@ def test_rolling_update6(self, namespace, api_gateway): r = rest_request_ambassador("mymodel", namespace, api_gateway) assert r.status_code == 200 res = r.json() - assert ( - res["meta"]["requestPath"]["complex-model"] - == "seldonio/fixed-model:0.1" - and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] - ) or ( - res["meta"]["requestPath"]["complex-model"] - == "seldonio/fixed-model:0.2" - and res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] + assert (res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]) or ( + res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] ) if (not r.status_code == 200) or ( res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] @@ -295,15 +251,8 @@ def test_rolling_update7(self, namespace, api_gateway): r = rest_request_ambassador("mymodel", namespace, api_gateway) assert r.status_code == 200 res = r.json() - assert ( - "complex-model" in res["meta"]["requestPath"] - and res["meta"]["requestPath"]["complex-model"] - == "seldonio/fixed-model:0.1" - and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] - ) or ( - res["meta"]["requestPath"]["complex-model2"] - == "seldonio/fixed-model:0.2" - and res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] + assert (res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]) or ( + res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] ) if (not r.status_code == 200) or ( res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] @@ -337,14 +286,7 @@ def test_rolling_update8(self, namespace, api_gateway): r = rest_request_ambassador("mymodel", namespace, api_gateway) assert r.status_code == 200 res = r.json() - assert res["meta"]["requestPath"][ - "complex-model" - ] == "seldonio/fixed-model:0.1" and res["data"]["tensor"]["values"] == [ - 1.0, - 2.0, - 3.0, - 4.0, - ] + assert res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] time.sleep(1) assert i == 49 logging.warning("Success for test_rolling_update8") @@ -373,20 +315,9 @@ def test_rolling_update9(self, namespace, api_gateway): r = rest_request_ambassador("mymodel", namespace, api_gateway) assert r.status_code == 200 res = r.json() - assert ( - "complex-model" in res["meta"]["requestPath"] - and res["meta"]["requestPath"]["complex-model"] - == "seldonio/fixed-model:0.1" - and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] - ) or ( - res["meta"]["requestPath"]["model1"] == "seldonio/fixed-model:0.1" - and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] - and res["meta"]["requestPath"]["model2"] == "seldonio/fixed-model:0.1" - ) - if (not r.status_code == 200) or ("model1" in res["meta"]["requestPath"]): - break + assert res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] time.sleep(1) - assert i < 100 + assert i == 49 logging.warning("Success for test_rolling_update9") run(f"kubectl delete -f ../resources/graph1svc.json -n {namespace}", shell=True) run(f"kubectl delete -f ../resources/graph5svc.json -n {namespace}", shell=True) @@ -413,15 +344,8 @@ def test_rolling_update10(self, namespace, api_gateway): r = rest_request_ambassador("mymodel", namespace, api_gateway) assert r.status_code == 200 res = r.json() - assert ( - "complex-model" in res["meta"]["requestPath"] - and res["meta"]["requestPath"]["complex-model"] - == "seldonio/fixed-model:0.1" - and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] - ) or ( - res["meta"]["requestPath"]["complex-model"] - == "seldonio/fixed-model:0.2" - and res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] + assert (res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]) or ( + res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] ) if (not r.status_code == 200) or ( res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] @@ -463,12 +387,8 @@ def test_rolling_update_deployment(namespace, from_deployment, to_deployment): r = rest_request_ambassador("mymodel", namespace, API_AMBASSADOR) assert r.status_code == 200 res = r.json() - assert ( - res["meta"]["requestPath"]["complex-model"] == "seldonio/fixed-model:0.1" - and res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0] - ) or ( - res["meta"]["requestPath"]["complex-model"] == "seldonio/fixed-model:0.2" - and res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] + assert (res["data"]["tensor"]["values"] == [1.0, 2.0, 3.0, 4.0]) or ( + res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] ) if (not r.status_code == 200) or ( res["data"]["tensor"]["values"] == [5.0, 6.0, 7.0, 8.0] diff --git a/testing/scripts/test_s2i_python.py b/testing/scripts/test_s2i_python.py index ab1c86cb1e..c9afdef6e4 100644 --- a/testing/scripts/test_s2i_python.py +++ b/testing/scripts/test_s2i_python.py @@ -28,7 +28,7 @@ def create_s2I_image(s2i_python_version, component_type, api_type): def kind_push_s2i_image(component_type, api_type): img = get_image_name(component_type, api_type) - cmd = "kind load docker-image " + img + " --loglevel trace" + cmd = "kind load docker-image " + img logging.warning(cmd) run(cmd, shell=True, check=True) @@ -145,11 +145,12 @@ def test_model_rest_non200(self, namespace, s2i_python_version): r = rest_request_ambassador("mymodel", namespace, API_AMBASSADOR, data=arr) res = r.json() logging.warning(res) - assert r.status_code == 200 - assert r.json()["status"]["code"] == 400 - assert r.json()["status"]["reason"] == "exception message" - assert r.json()["status"]["info"] == "exception caught" - assert r.json()["status"]["status"] == "FAILURE" + assert r.status_code == 500 + assert r.json()["status"]["code"] == 500 + assert ( + r.json()["status"]["info"] + == "Internal service call failed calling http://localhost:9000/predict status code 400" + ) run( f"kubectl delete -f ../resources/s2i_python_model_non200.json -n {namespace}", shell=True,