diff --git a/docker/README.md b/docker/README.md index 7be3b0ab..ce139cd2 100644 --- a/docker/README.md +++ b/docker/README.md @@ -11,6 +11,8 @@ The built images are: * docker-pull - performs a 'docker pull' for action runtimes specified in runtimesManifest format -- used to prefetch action runtime images for invoker nodes + * invoker-agent - worker node invoker agent -- used to implement + suspend/resume and log consolidation ops for a remote invoker * openwhisk-catalog - installs the catalog from the project incubator-openwhisk-calalog to the system namespace of the OpenWhisk deployment. diff --git a/docker/invoker-agent/Dockerfile b/docker/invoker-agent/Dockerfile new file mode 100644 index 00000000..bee587ec --- /dev/null +++ b/docker/invoker-agent/Dockerfile @@ -0,0 +1,34 @@ +###### +# build-stage +###### +FROM golang:alpine AS build-env + +RUN apk add --no-cache curl git openssh + +# Build the invoker-agent executable +RUN mkdir -p /openwhisk/src/invoker-agent +COPY main.go /openwhisk/src/invoker-agent +ENV GOPATH=/openwhisk +RUN go get github.com/gorilla/mux +RUN go install invoker-agent + +# Get docker CLI for interactive debugging when running +ENV DOCKER_VERSION 1.12.0 +RUN curl -sSL -o docker-${DOCKER_VERSION}.tgz https://get.docker.com/builds/Linux/x86_64/docker-${DOCKER_VERSION}.tgz && \ +tar --strip-components 1 -xvzf docker-${DOCKER_VERSION}.tgz -C /usr/bin docker/docker && \ +rm -f docker-${DOCKER_VERSION}.tgz && \ +chmod +x /usr/bin/docker + + +###### +# Final stage +###### +FROM alpine + +RUN mkdir -p /openwhisk/bin +COPY --from=build-env /openwhisk/bin/invoker-agent /openwhisk/bin/invoker-agent + +# For ease of debugging/inspection. Not needed by invoker-agent +COPY --from=build-env /usr/bin/docker /usr/bin/docker + +CMD ["/openwhisk/bin/invoker-agent"] diff --git a/docker/invoker-agent/main.go b/docker/invoker-agent/main.go new file mode 100644 index 00000000..e37e0bfe --- /dev/null +++ b/docker/invoker-agent/main.go @@ -0,0 +1,336 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "bufio" + "encoding/json" + "fmt" + "github.com/gorilla/mux" + "io/ioutil" + "log" + "net" + "net/http" + "os" + "strconv" + "strings" + "time" +) + +/* JSON structure expected as request body on /logs route */ +type LogForwardInfo struct { + LastOffset int64 `json:"lastOffset"` // last offset read from this container's log + SizeLimit int `json:"sizeLimit"` // size limit on logs read in bytes + SentinelledLogs bool `json:"sentinelledLogs"` // does an action's log end with sentinel lines? + EncodedLogLineMetadata string `json:"encodedLogLineMetadata"` // string to be injected in every log line + EncodedActivation string `json:"encodedActivation"` // extra line to injected after all log lines are read +} + +/* Size threshold for individual output files written by the logWriter */ + +/* String constants related to logging */ +const ( + logSentinelLine = "XXX_THE_END_OF_A_WHISK_ACTIVATION_XXX" + truncatedLogMessage = "Logs were truncated because the total bytes size exceeds the limit of %d bytes." + genericLogErrorMessage = "There was an issue while collecting your logs. Data might be missing." +) + +/* Should we measure and report time taken for each operation? */ +const timeOps = false + +/* configuration variables; may be overridden by setting matching envvar */ +var ( + dockerSock string = "/var/run/docker.sock" + containerDir string = "/containers" + outputLogDir string = "/action-logs" + invokerAgentPort int = 3233 + logSinkSize int64 = 100 * 1024 * 1024 +) + +/* http.Client instance bound to dockerSock */ +var client *http.Client + +/* channel to send log lines to the logWriter */ +var logSinkChannel chan string + +/* + * Support for writing log lines to the logSink + */ + +// go routine that accepts log lines from the logSinkChannel and writes them to the logSink +func logWriter() { + var sinkFile *os.File = nil + var sinkFileBytes int64 = 0 + var err error + + for { + line := <-logSinkChannel + + if sinkFile == nil { + timestamp := time.Now().UnixNano() / 1000000 + fname := fmt.Sprintf("%s/userlogs-%d.log", outputLogDir, timestamp) + sinkFile, err = os.Create(fname) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to create log sink: %v\n", err) + panic(err) + } + sinkFileBytes = 0 + } + + bytesWritten, err := fmt.Fprintln(sinkFile, line) + if err != nil { + fmt.Fprintf(os.Stderr, "Error writing to log sink: %v\n", err) + panic(err) + } + + sinkFileBytes += int64(bytesWritten) + if sinkFileBytes > logSinkSize { + sinkFile.Close() + sinkFile = nil + } + } +} + +func writeSyntheticLogLine(msg string, metadata string) { + now := time.Now().UTC().Format(time.RFC3339) + line := fmt.Sprintf("{\"log\":\"%s\", \"stream\":\"stderr\", \"time\":\"%s\",%s}", msg, now, metadata) + logSinkChannel <- line +} + +func reportLoggingError(w http.ResponseWriter, code int, msg string, metadata string) { + w.WriteHeader(code) + fmt.Fprint(w, msg) + fmt.Fprintln(os.Stderr, msg) + if metadata != "" { + writeSyntheticLogLine(genericLogErrorMessage, metadata) + } +} + +// Request handler for /logs/ route +// The container was given as part of the URL; gorilla makes it available in vars["container"] +// The JSON body of the request is expected to contain the fields specified by the +// LogForwardInfo struct defined above. +// If logs are successfully forwarded, the ending offset of the log file is returned +// to be used in a subsequent call to the /logs/ route. +func forwardLogsFromUserAction(w http.ResponseWriter, r *http.Request) { + var start time.Time + if timeOps { + start = time.Now() + } + + vars := mux.Vars(r) + container := vars["container"] + + var lfi LogForwardInfo + b, err := ioutil.ReadAll(r.Body) + defer r.Body.Close() + if err != nil { + reportLoggingError(w, 400, fmt.Sprintf("Error reading request body: %v", err), "") + return + } + err = json.Unmarshal(b, &lfi) + if err != nil { + reportLoggingError(w, 400, fmt.Sprint("Error unmarshalling request body: %v", err), "") + return + } + + logFileName := containerDir + "/" + container + "/" + container + "-json.log" + logFile, err := os.Open(logFileName) + defer logFile.Close() + if err != nil { + reportLoggingError(w, 500, fmt.Sprintf("Error opening %s: %v", logFileName, err), lfi.EncodedLogLineMetadata) + logSinkChannel <- lfi.EncodedActivation // Write activation record before returning with error code. + return + } + + offset, err := logFile.Seek(lfi.LastOffset, 0) + if offset != lfi.LastOffset || err != nil { + reportLoggingError(w, 500, fmt.Sprintf("Unable to seek to %d in log file", lfi.LastOffset), lfi.EncodedLogLineMetadata) + logSinkChannel <- lfi.EncodedActivation // Write activation record before returning with error code. + return + } + + sentinelsLeft := 2 + scanner := bufio.NewScanner(logFile) + bytesWritten := 0 + for sentinelsLeft > 0 && scanner.Scan() { + logLine := scanner.Text() + if lfi.SentinelledLogs && strings.Contains(logLine, logSentinelLine) { + sentinelsLeft -= 1 + } else { + logLineLen := len(logLine) + bytesWritten += logLineLen + mungedLine := fmt.Sprintf("%s,%s}", logLine[:logLineLen-1], lfi.EncodedLogLineMetadata) + logSinkChannel <- mungedLine + if bytesWritten > lfi.SizeLimit { + writeSyntheticLogLine(fmt.Sprintf(truncatedLogMessage, lfi.SizeLimit), lfi.EncodedLogLineMetadata) + logFile.Seek(0, 2) // Seek to end of logfile to skip rest of output and prepare for next action invoke + sentinelsLeft = 0 // Cause loop to exit now. + } + } + } + + if lfi.SentinelledLogs && sentinelsLeft != 0 { + reportLoggingError(w, 500, "Failed to find expected sentinels in log file", lfi.EncodedLogLineMetadata) + logSinkChannel <- lfi.EncodedActivation // Write activation record before returning with error code. + return + } + + // Done copying log; write the activation record. + logSinkChannel <- lfi.EncodedActivation + + // seek 0 bytes from current position to set logFileOffset to current fpos + logFileOffset, err := logFile.Seek(0, 1) + if err != nil { + reportLoggingError(w, 500, fmt.Sprintf("Unable to determine current offset in log file: %v", err), lfi.EncodedLogLineMetadata) + return + } + + // Success; return updated logFileOffset to invoker + w.WriteHeader(200) + fmt.Fprintf(w, "%d", logFileOffset) + + if timeOps { + end := time.Now() + elapsed := end.Sub(start) + fmt.Fprintf(os.Stdout, "LogForward took %s\n", elapsed.String()) + } +} + +/* + * Suppout for suspend/resume operations + */ + +// handler for /resume/ route +// The container was given as part of the URL; gorilla makes it available in vars["container"] +func resumeUserAction(w http.ResponseWriter, r *http.Request) { + var start time.Time + if timeOps { + start = time.Now() + } + + vars := mux.Vars(r) + container := vars["container"] + dummy := strings.NewReader("") + resp, err := client.Post("http://localhost/containers/"+container+"/unpause", "text/plain", dummy) + if err != nil { + w.WriteHeader(500) + fmt.Fprintf(w, "Unpausing %s failed with error: %v\n", container, err) + } else if resp.StatusCode < 200 || resp.StatusCode > 299 { + w.WriteHeader(500) + fmt.Fprint(w, "Unpausing %s failed with status code: %d\n", container, resp.StatusCode) + } else { + w.WriteHeader(204) // success! + } + + if timeOps { + end := time.Now() + elapsed := end.Sub(start) + fmt.Fprintf(os.Stdout, "Unpause took %s\n", elapsed.String()) + } +} + +// handler for /resume/ route +// The container was given as part of the URL; gorilla makes it available in vars["container"] +func suspendUserAction(w http.ResponseWriter, r *http.Request) { + var start time.Time + if timeOps { + start = time.Now() + } + + vars := mux.Vars(r) + container := vars["container"] + dummy := strings.NewReader("") + resp, err := client.Post("http://localhost/containers/"+container+"/pause", "text/plain", dummy) + if err != nil { + w.WriteHeader(500) + fmt.Fprintf(w, "Pausing %s failed with error: %v\n", container, err) + } else if resp.StatusCode < 200 || resp.StatusCode > 299 { + w.WriteHeader(500) + fmt.Fprint(w, "Pausing %s failed with status code: %d\n", container, resp.StatusCode) + } else { + w.WriteHeader(204) // success! + } + + if timeOps { + end := time.Now() + elapsed := end.Sub(start) + fmt.Fprintf(os.Stdout, "Pause took %s\n", elapsed.String()) + } +} + +/* + * Initialization and main function + */ + +// Process configuration overrides from environment +func initializeFromEnv() { + var err error + if os.Getenv("INVOKER_AGENT_DOCKER_SOCK") != "" { + dockerSock = os.Getenv("INVOKER_AGENT_DOCKER_SOCK") + } + if os.Getenv("INVOKER_AGENT_CONTAINER_DIR") != "" { + containerDir = os.Getenv("INVOKER_AGENT_CONTAINER_DIR") + } + if os.Getenv("INVOKER_AGENT_OUTPUT_LOG_DIR") != "" { + outputLogDir = os.Getenv("INVOKER_AGENT_OUTPUT_LOG_DIR") + } + if os.Getenv("INVOKER_AGENT_PORT") != "" { + str := os.Getenv("INVOKER_AGENT_PORT") + invokerAgentPort, err = strconv.Atoi(str) + if err != nil { + fmt.Fprintf(os.Stderr, "Invalid INVOKER_AGENT_PORT %s; error was %v\n", str, err) + panic(err) + } + } + if os.Getenv("INVOKER_AGENT_LOG_SINK_SIZE") != "" { + str := os.Getenv("INVOKER_AGENT_LOG_SINK_SIZE") + logSinkSize, err = strconv.ParseInt(str, 10, 64) + if err != nil { + fmt.Fprintf(os.Stderr, "Invalid INVOKER_AGENT_LOG_SINK_SIZE %s; error was %v\n", str, err) + panic(err) + } + } +} + +func handleRequests() { + myRouter := mux.NewRouter().StrictSlash(true) + myRouter.HandleFunc("/logs/{container}", forwardLogsFromUserAction) + myRouter.HandleFunc("/suspend/{container}", suspendUserAction) + myRouter.HandleFunc("/resume/{container}", resumeUserAction) + log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", invokerAgentPort), myRouter)) +} + +func main() { + initializeFromEnv() + + // Open http client to dockerSock + fd := func(proto, addr string) (conn net.Conn, err error) { + return net.Dial("unix", dockerSock) + } + tr := &http.Transport{ + Dial: fd, + } + client = &http.Client{Transport: tr} + + // initialize logSink subsystem & schedule logWrite go routine + logSinkChannel = make(chan string) + go logWriter() + + handleRequests() +} diff --git a/kubernetes/invoker/README.md b/kubernetes/invoker/README.md index 9e60c11a..cda564d3 100644 --- a/kubernetes/invoker/README.md +++ b/kubernetes/invoker/README.md @@ -1,45 +1,109 @@ Invoker ------- -# Deploying +# Overview + +The Invoker is responsible for creating and managing the containers +that OpenWhisk creates to execute the user defined functions. A key +function of the Invoker is to manage a cache of available warm +containers to minimize cold starts of user functions. +Architecturally, we support two options for deploying the Invoker +component on Kubernetes (selected by picking a +`ContainerFactoryProviderSPI` for your deployment). + 1. `DockerContainerFactory` matches the architecture used by the + non-Kubernetes deployments of OpenWhisk. In this approach, an + Invoker instance runs on every Kubernetes worker node that is + being used to execute user functions. The Invoker directly + communicates with the docker daemon running on the worker node + to create and manage the user function containers. The primary + advantages of this configuration are lower latency on container + management operations and robustness of the code paths being + used (since they are the same as in the default system). The + primary disadvantage is that it does not leverage Kubernetes to + simplify resource management, security configuration, etc. for + user containers. + 2. `KubernetesContainerFactory` is a truly Kubernetes-native design + where although the Invoker is still responsible for managing the + cache of available user containers, the Invoker relies on Kubernetes to + create, schedule, and manage the Pods that contain the user function + containers. The pros and cons of this design are roughly the + inverse of `DockerContainerFactory`. Kubernetes pod management + operations have higher latency and exercise newer code paths in + the Invoker. However, this design fully leverages Kubernetes to + manage the execution resources for user functions. -## Create config map +# Deploying -Edit invoker.env as needed to set the appropriate values for your -deployment, then create the configmap invoker.config: +## Label the worker nodes +In either approach, it is desirable to indicate which worker nodes +should be used to execute user containers. Do this by labeling each +node with `openwhisk-role=invoker`. For a single node cluster, simply do +``` +kubectl label nodes --all openwhisk-role=invoker +``` +If you have a multi-node cluster, for each node +you want to be an invoker, execute ``` -kubectl -n openwhisk create cm invoker.config --from-env-file=invoker.env +$ kubectl label nodes openwhisk-role=invoker ``` -## Deploy Invoker +## Deploying using the DockerContainerFactory -When deploying the Invoker, it needs to be deployed via a -[DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/). -This is because there should only ever be at most 1 Invoker -instance per Kube Node. To set these restrictions, it will be -up to the Kubernetes deployment operator to properly apply -the correct labels and taints to each required Kube node. +### Create the invoker.config config map -With the defaults in the current `invoker.yml`, you can setup a -node to run only Invoker pods with: +Edit invoker-dcf.env to make any customizations needed for your +deployment, create the config map: +``` +kubectl -n openwhisk create cm invoker.config --from-env-file=invoker-dcf.env +``` +### Deploy the Invoker as a DaemonSet + +This will deploy an Invoker instance on every Kubernetes worker node +labeled with openwhisk-role=invoker. ``` -kubectl label nodes [node name] openwhisk-role=invoker -$ kubectl label nodes 127.0.0.1 openwhisk-role=invoker +kubectl apply -f invoker-dcf.yml ``` -Once the invoker label is applied, you can create the invokers with: +## Deploying using the KubernetesContainerFactory + +The KubernetesContainerFactory can be deployed with an additional +invokerAgent that implements container suspend/resume operations on +behalf of a remote Invoker. The instructions here included deploying +the invokerAgent. If you do not want to do this, skip deploying the +invokerAgent daemonset and edit invoker-k8scf.yml to set +`CONFIG_whisk_kubernetes_invokerAgent_enabled` to `FALSE`. +### Create the invoker.config config map + +Edit invoker-k8scf.env to make any customizations needed for your +deployment, create the config map: +``` +kubectl -n openwhisk create cm invoker.config --from-env-file=invoker-k8scf.env +``` + +### Deploy the invokerAgent Daemonset ``` -kubectl apply -f invoker.yml +kubectl apply -f invoker-agent.yml ``` +Wait for all of the invoker-agent pods to be running. This might take a +couple of minutes because the invoker-agent also prefetches the docker images +for the default set of user action runtimes by doing docker pulls as an +init container. -**Important** +### Deploy the Invoker as a StatefulSet + +By default, this will deploy a single Invoker instance. Optionally +edit invoker-k8scf.yml to change the number of Invoker replicas and +then do: +``` +kubectl apply -f invoker-k8scf.yml +``` # Troubleshooting -## No invokers are deployed +## No invokers are deployed with DockerContainerFactory Verify that you actually have at least one node with the label openwhisk-role=invoker. @@ -54,5 +118,5 @@ means that the default volume hostPath values assume that the Kubernetes worker node image is Ubuntu. If containers fail to start with errors related mounting`/sys/fs/cgroup`, `/run/runc`,`/var/lib/docker/containers`, or `/var/run/docker.sock`, then you will need to change the corresponding -value in [invoker.yml](invoker.yml) to match the host operating system +value in [invoker-dcf.yml](invoker-dcf.yml) to match the host operating system running on your Kubernetes worker node. diff --git a/kubernetes/invoker/invoker-agent.yml b/kubernetes/invoker/invoker-agent.yml new file mode 100644 index 00000000..80cdd746 --- /dev/null +++ b/kubernetes/invoker/invoker-agent.yml @@ -0,0 +1,104 @@ +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: invoker-agent + namespace: openwhisk + labels: + name: invoker-agent +spec: + template: + metadata: + labels: + name: invoker-agent + spec: + restartPolicy: Always + hostNetwork: true + + # run only on nodes labeled with openwhisk-role=invoker + # TODO: disabled affinity until user-action pods are + # created with the same affinity rules. + # Requires extension to upstream kube java client + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: openwhisk-role + # operator: In + # values: + # - invoker + + volumes: + - name: cgroup + hostPath: + path: "/sys/fs/cgroup" + - name: runc + hostPath: + path: "/run/runc" + - name: dockerrootdir + hostPath: + path: "/var/lib/docker/containers" + - name: dockersock + hostPath: + path: "/var/run/docker.sock" + - name: userlogs + emptyDir: {} + + initContainers: + - name: docker-pull-runtimes + imagePullPolicy: Always + image: openwhisk/kube-docker-pull + volumeMounts: + - name: dockersock + mountPath: "/var/run/docker.sock" + env: + # action runtimes + - name: "RUNTIMES_MANIFEST" + valueFrom: + configMapKeyRef: + name: whisk.runtimes + key: runtimes + + containers: + - name: invoker-agent + imagePullPolicy: Always + image: openwhisk/kube-invoker-agent + securityContext: + privileged: true + ports: + # IANA port 3233 "whisker" for "WhiskerControl" ;) + - name: agent + containerPort: 3233 + hostPort: 3233 + volumeMounts: + - name: cgroup + mountPath: "/sys/fs/cgroup" + - name: runc + mountPath: "/run/runc" + - name: dockersock + mountPath: "/var/run/docker.sock" + - name: dockerrootdir + mountPath: "/containers" + - name: userlogs + mountPath: "/action-logs" + env: + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: invoker-agent-netpol + namespace: openwhisk +spec: + podSelector: + matchLabels: + name: invoker-agent + ingress: + # Allow invoker to connect to invoker-agent + - from: + - podSelector: + matchLabels: + name: invoker + ports: + - port: 3233 diff --git a/kubernetes/invoker/invoker.env b/kubernetes/invoker/invoker-dcf.env similarity index 60% rename from kubernetes/invoker/invoker.env rename to kubernetes/invoker/invoker-dcf.env index 75285b89..ac1346f6 100644 --- a/kubernetes/invoker/invoker.env +++ b/kubernetes/invoker/invoker-dcf.env @@ -1,4 +1,4 @@ -java_opts=-Xmx2g +java_opts=-Xmx2g -Dwhisk.spi.ContainerFactoryProvider=whisk.core.containerpool.docker.DockerContainerFactoryProvider invoker_opts= invoker_container_network=bridge invoker_container_dns= diff --git a/kubernetes/invoker/invoker.yml b/kubernetes/invoker/invoker-dcf.yml similarity index 100% rename from kubernetes/invoker/invoker.yml rename to kubernetes/invoker/invoker-dcf.yml diff --git a/kubernetes/invoker/invoker-k8scf.env b/kubernetes/invoker/invoker-k8scf.env new file mode 100644 index 00000000..2cd03bf9 --- /dev/null +++ b/kubernetes/invoker/invoker-k8scf.env @@ -0,0 +1,9 @@ +java_opts=-Xmx2g -Dkubernetes.master=https://$KUBERNETES_SERVICE_HOST -Dwhisk.spi.ContainerFactoryProvider=whisk.core.containerpool.kubernetes.KubernetesContainerFactoryProvider +invoker_opts= +invoker_container_network=bridge +invoker_container_dns= +invoker_use_runc=false +docker_image_prefix=openwhisk +docker_image_tag=latest +docker_registry= +invoker_logs_dir= diff --git a/kubernetes/invoker/invoker-k8scf.yml b/kubernetes/invoker/invoker-k8scf.yml new file mode 100644 index 00000000..7c5129c3 --- /dev/null +++ b/kubernetes/invoker/invoker-k8scf.yml @@ -0,0 +1,201 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: invoker + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + namespace: openwhisk + name: invoker +rules: +- apiGroups: ["extensions"] + resources: ["deployments"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +- apiGroups: [""] + resources: ["pods/log"] + verbs: ["get", "list"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: invoker-rbac + namespace: openwhisk +subjects: +- kind: ServiceAccount + name: invoker + namespace: openwhisk +roleRef: + kind: Role + name: invoker + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + name: invoker + namespace: openwhisk + labels: + name: invoker +spec: + replicas: 1 + serviceName: invoker + template: + metadata: + labels: + name: invoker + spec: + serviceAccountName: invoker + restartPolicy: Always + + affinity: + # prefer to run on an invoker node (only prefer because of single node clusters) + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: openwhisk-role + operator: In + values: + - invoker + # do not allow more than 1 invoker instance to run on a node + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: name + operator: In + values: + - invoker + topologyKey: "kubernetes.io/hostname" + + containers: + - name: invoker + imagePullPolicy: Always + image: openwhisk/invoker + command: [ "/bin/bash", "-c", "COMPONENT_NAME=$(hostname | cut -d'-' -f2) /init.sh" ] + ports: + - name: invoker + containerPort: 8080 + env: + - name: "PORT" + value: "8080" + + # Invoker name is name of pod (invoker-0, invoker-1, etc). + - name: "INVOKER_NAME" + valueFrom: + fieldRef: + fieldPath: metadata.name + + - name: "WHISK_API_HOST_NAME" + valueFrom: + configMapKeyRef: + name: whisk.ingress + key: api_host + + # Enable invoker-agent + - name: "CONFIG_whisk_kubernetes_invokerAgent_enabled" + value: "TRUE" + + # Docker-related options + - name: "INVOKER_USE_RUNC" + value: "FALSE" + - name: "DOCKER_IMAGE_PREFIX" + valueFrom: + configMapKeyRef: + name: invoker.config + key: docker_image_prefix + - name: "DOCKER_IMAGE_TAG" + valueFrom: + configMapKeyRef: + name: invoker.config + key: docker_image_tag + - name: "DOCKER_REGISTRY" + valueFrom: + configMapKeyRef: + name: invoker.config + key: docker_registry + + # action runtimes + - name: "RUNTIMES_MANIFEST" + valueFrom: + configMapKeyRef: + name: whisk.runtimes + key: runtimes + + # extra JVM arguments + - name: "JAVA_OPTS" + valueFrom: + configMapKeyRef: + name: invoker.config + key: java_opts + + # extra Invoker arguments + - name: "INVOKER_OPTS" + valueFrom: + configMapKeyRef: + name: invoker.config + key: invoker_opts + + # Recommend using "" because logs should go to stdout on kube + - name: "WHISK_LOGS_DIR" + valueFrom: + configMapKeyRef: + name: invoker.config + key: invoker_logs_dir + + # properties for Kafka connection + - name: "KAFKA_HOSTS" + value: "$(KAFKA_SERVICE_HOST):$(KAFKA_SERVICE_PORT_KAFKA)" + + # properties for zookeeper connection + - name: "ZOOKEEPER_HOSTS" + value: "$(ZOOKEEPER_SERVICE_HOST):$(ZOOKEEPER_SERVICE_PORT_ZOOKEEPER)" + + # properties for DB connection + - name: "CONFIG_whisk_couchdb_username" + valueFrom: + secretKeyRef: + name: db.auth + key: db_username + - name: "CONFIG_whisk_couchdb_password" + valueFrom: + secretKeyRef: + name: db.auth + key: db_password + - name: "CONFIG_whisk_couchdb_protocol" + valueFrom: + configMapKeyRef: + name: db.config + key: db_protocol + - name: "CONFIG_whisk_couchdb_host" + value: "$(COUCHDB_SERVICE_HOST)" + - name: "CONFIG_whisk_couchdb_port" + value: "$(COUCHDB_SERVICE_PORT_COUCHDB)" + - name: "CONFIG_whisk_couchdb_provider" + valueFrom: + configMapKeyRef: + name: db.config + key: db_provider + - name: "CONFIG_whisk_couchdb_databases_WhiskActivation" + valueFrom: + configMapKeyRef: + name: db.config + key: db_whisk_activations + - name: "CONFIG_whisk_couchdb_databases_WhiskEntity" + valueFrom: + configMapKeyRef: + name: db.config + key: db_whisk_actions + - name: "CONFIG_whisk_couchdb_databases_WhiskAuth" + valueFrom: + configMapKeyRef: + name: db.config + key: db_whisk_auths diff --git a/tools/travis/build.sh b/tools/travis/build.sh index 3920d80b..8feaceb8 100755 --- a/tools/travis/build.sh +++ b/tools/travis/build.sh @@ -137,7 +137,7 @@ ROOTDIR="$SCRIPTDIR/../../" cd $ROOTDIR -# Label invoker nodes (needed for daemonset-based invoker deployment) +# Label invoker nodes (needed for DockerContainerFactory-based invoker deployment) echo "Labeling invoker node" kubectl label nodes --all openwhisk-role=invoker kubectl describe nodes @@ -210,10 +210,10 @@ pushd kubernetes/controller popd # setup the invoker -echo "Deploying invoker" +echo "Deploying invoker using DockerContainerFactory" pushd kubernetes/invoker - kubectl -n openwhisk create cm invoker.config --from-env-file=invoker.env - kubectl apply -f invoker.yml + kubectl -n openwhisk create cm invoker.config --from-env-file=invoker-dcf.env + kubectl apply -f invoker-dcf.yml # wait until the invoker is ready deploymentHealthCheck "invoker" diff --git a/tools/travis/deploy.sh b/tools/travis/deploy.sh index 24ad12f9..efc8dc07 100755 --- a/tools/travis/deploy.sh +++ b/tools/travis/deploy.sh @@ -13,6 +13,9 @@ echo "Publishing kube-couchdb image" echo "Publishing kube-docker-pull image" ./tools/travis/publish.sh openwhisk kube-docker-pull latest docker/docker-pull +echo "Publishing kube-invoker-agent image" +./tools/travis/publish.sh openwhisk kube-invoker-agent latest docker/invoker-agent + echo "Publishing kube-openwhisk-catalog image" ./tools/travis/publish.sh openwhisk kube-openwhisk-catalog latest docker/openwhisk-catalog