diff --git a/.argo-ci/ci.yaml b/.argo-ci/ci.yaml
index 1ca24a44e0..8192403390 100644
--- a/.argo-ci/ci.yaml
+++ b/.argo-ci/ci.yaml
@@ -6,68 +6,68 @@ spec:
entrypoint: argo-events-ci
arguments:
parameters:
- - name: revision
- value: master
- - name: repo
- value: https://github.com/argoproj/argo-events.git
+ - name: revision
+ value: master
+ - name: repo
+ value: https://github.com/argoproj/argo-events.git
templates:
- - name: argo-events-ci
- steps:
- - - name: build
- template: ci-dind
- arguments:
- parameters:
+ - name: argo-events-ci
+ steps:
+ - - name: build
+ template: ci-dind
+ arguments:
+ parameters:
+ - name: cmd
+ value: "{{item}}"
+ withItems:
+ - dep ensure && make
+ - name: test
+ template: ci-builder
+ arguments:
+ parameters:
+ - name: cmd
+ value: "{{item}}"
+ withItems:
+ - dep ensure && make test
+ - name: ci-builder
+ inputs:
+ parameters:
- name: cmd
- value: "{{item}}"
- withItems:
- - dep ensure && make
- - name: test
- template: ci-builder
- arguments:
- parameters:
- - name: cmd
- value: "{{item}}"
- withItems:
- - dep ensure && make test
- - name: ci-builder
- inputs:
- parameters:
- - name: cmd
- artifacts:
- - name: code
- path: /go/src/github.com/argoproj/argo-events
- git:
- repo: "{{workflow.parameters.repo}}"
- revision: "{{workflow.parameters.revision}}"
- container:
- image: argoproj/argo-events-ci-builder:1.0
- command: [sh, -c]
- args: ["{{inputs.parameters.cmd}}"]
- workingDir: /go/src/github.com/argoproj/argo-events
+ artifacts:
+ - name: code
+ path: /go/src/github.com/argoproj/argo-events
+ git:
+ repo: "{{workflow.parameters.repo}}"
+ revision: "{{workflow.parameters.revision}}"
+ container:
+ image: argoproj/argo-events-ci-builder:1.0
+ command: [sh, -c]
+ args: ["{{inputs.parameters.cmd}}"]
+ workingDir: /go/src/github.com/argoproj/argo-events
- - name: ci-dind
- inputs:
- parameters:
- - name: cmd
- artifacts:
- - name: code
- path: /go/src/github.com/argoproj/argo-events
- git:
- repo: "{{workflow.parameters.repo}}"
- revision: "{{workflow.parameters.revision}}"
- container:
- image: argoproj/argo-events-ci-builder:1.0
- command: [sh, -c]
- args: ["until docker ps; do sleep 3; done && {{inputs.parameters.cmd}}"]
- workingDir: /go/src/github.com/argoproj/argo-events
- env:
- - name: DOCKER_HOST
- value: 127.0.0.1
- sidecars:
- - name: dind
- image: docker:17.10-dind
- securityContext:
- privileged: true
- mirrorVolumeMounts: true
+ - name: ci-dind
+ inputs:
+ parameters:
+ - name: cmd
+ artifacts:
+ - name: code
+ path: /go/src/github.com/argoproj/argo-events
+ git:
+ repo: "{{workflow.parameters.repo}}"
+ revision: "{{workflow.parameters.revision}}"
+ container:
+ image: argoproj/argo-events-ci-builder:1.0
+ command: [sh, -c]
+ args: ["until docker ps; do sleep 3; done && {{inputs.parameters.cmd}}"]
+ workingDir: /go/src/github.com/argoproj/argo-events
+ env:
+ - name: DOCKER_HOST
+ value: 127.0.0.1
+ sidecars:
+ - name: dind
+ image: docker:17.10-dind
+ securityContext:
+ privileged: true
+ mirrorVolumeMounts: true
diff --git a/.travis.yml b/.travis.yml
index 9dc6ec5edc..f71788633f 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -21,9 +21,9 @@ before_install:
- if [[ -d $HOME/docker ]]; then ls $HOME/docker/*.tar.gz | xargs -I {file} sh -c "zcat {file} | docker load"; fi
- go get github.com/mattn/goveralls
- if [ ! -d $HOME/bin/kubectl ]; then
- mkdir -p $HOME/bin;
- curl -o $HOME/bin/kubectl -L https://storage.googleapis.com/kubernetes-release/release/v1.13.4/bin/linux/amd64/kubectl;
- chmod +x $HOME/bin/kubectl;
+ mkdir -p $HOME/bin;
+ curl -o $HOME/bin/kubectl -L https://storage.googleapis.com/kubernetes-release/release/v1.13.4/bin/linux/amd64/kubectl;
+ chmod +x $HOME/bin/kubectl;
fi
before_cache:
diff --git a/Gopkg.lock b/Gopkg.lock
index 4935d93e52..faa890f74c 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -3,6 +3,7 @@
[[projects]]
branch = "master"
+ digest = "1:2ff9987255c3f00d7aa928f67977b23def1bf86ab60a7c862579bf2ee29d85bb"
name = "cloud.google.com/go"
packages = [
".",
@@ -12,51 +13,65 @@
"internal/version",
"pubsub",
"pubsub/apiv1",
- "pubsub/internal/distribution"
+ "pubsub/internal/distribution",
]
- revision = "90c61cb6b2d275ce6037ff82aa8cef13996bd861"
+ pruneopts = "UT"
+ revision = "08bca813144c81c4a70e9f73ce42c98599238e54"
[[projects]]
+ digest = "1:9f3b30d9f8e0d7040f729b82dcbc8f0dead820a133b3147ce355fc451f32d761"
name = "github.com/BurntSushi/toml"
packages = ["."]
+ pruneopts = "UT"
revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005"
version = "v0.3.1"
[[projects]]
branch = "master"
+ digest = "1:dc648facc1e7aac5086f749c84c9b9263345c08161fadd9cf92ae3309c9fcaa6"
name = "github.com/Knetic/govaluate"
packages = ["."]
+ pruneopts = "UT"
revision = "9aa49832a739dcd78a5542ff189fb82c3e423116"
[[projects]]
+ digest = "1:a2682518d905d662d984ef9959984ef87cecb777d379bfa9d9fe40e78069b3e4"
name = "github.com/PuerkitoBio/purell"
packages = ["."]
+ pruneopts = "UT"
revision = "44968752391892e1b0d0b821ee79e9a85fa13049"
version = "v1.1.1"
[[projects]]
branch = "master"
+ digest = "1:c739832d67eb1e9cc478a19cc1a1ccd78df0397bf8a32978b759152e205f644b"
name = "github.com/PuerkitoBio/urlesc"
packages = ["."]
+ pruneopts = "UT"
revision = "de5bf2ad457846296e2031421a34e2568e304e35"
[[projects]]
+ digest = "1:05705adc86e7ba79c48d17cfbe1760e41d227f43e2880345dd7226e0ce67e4be"
name = "github.com/Shopify/sarama"
packages = ["."]
+ pruneopts = "UT"
revision = "675b0b1ff204c259877004140a540d6adf38db17"
version = "v1.24.1"
[[projects]]
+ digest = "1:252cca52c71315b507cfbad0b0d34c6a525c8c2427f0b09e0bf07baa70b39a51"
name = "github.com/argoproj/argo"
packages = [
"pkg/apis/workflow",
- "pkg/apis/workflow/v1alpha1"
+ "pkg/apis/workflow/v1alpha1",
]
+ pruneopts = "UT"
revision = "675c66267f0c916de0f233d8101aa0646acb46d4"
version = "v2.4.2"
[[projects]]
branch = "master"
+ digest = "1:3dc369d4d676d019a541e5bd27829e689cd6218343843cd84fe5477eb3899ca2"
name = "github.com/aws/aws-sdk-go"
packages = [
"aws",
@@ -92,78 +107,98 @@
"service/sns",
"service/sqs",
"service/sts",
- "service/sts/stsiface"
+ "service/sts/stsiface",
]
- revision = "ab596ec53119dade64e6f3cf60dab4b45e613890"
+ pruneopts = "UT"
+ revision = "36d1d7065765e55ac085734aa68be6de7c380b66"
[[projects]]
+ digest = "1:357f4baa5f50bb2a9d9d01600c8dadebf1cb890b59b53a4c810301fc7bf3736c"
name = "github.com/colinmarc/hdfs"
packages = [
".",
"protocol/hadoop_common",
"protocol/hadoop_hdfs",
- "rpc"
+ "rpc",
]
+ pruneopts = "UT"
revision = "48eb8d6c34a97ffc73b406356f0f2e1c569b42a5"
[[projects]]
+ digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
+ pruneopts = "UT"
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
version = "v1.1.1"
[[projects]]
branch = "master"
+ digest = "1:ecdc8e0fe3bc7d549af1c9c36acf3820523b707d6c071b6d0c3860882c6f7b42"
name = "github.com/docker/spdystream"
packages = [
".",
- "spdy"
+ "spdy",
]
+ pruneopts = "UT"
revision = "6480d4af844c189cf5dd913db24ddd339d3a4f85"
[[projects]]
+ digest = "1:6f9339c912bbdda81302633ad7e99a28dfa5a639c864061f1929510a9a64aa74"
name = "github.com/dustin/go-humanize"
packages = ["."]
+ pruneopts = "UT"
revision = "9f541cc9db5d55bce703bd99987c9d5cb8eea45e"
version = "v1.0.0"
[[projects]]
+ digest = "1:1f0c7ab489b407a7f8f9ad16c25a504d28ab461517a971d341388a56156c1bd7"
name = "github.com/eapache/go-resiliency"
packages = ["breaker"]
+ pruneopts = "UT"
revision = "5efd2ed019fd331ec2defc6f3bd98882f1e3e636"
version = "v1.2.0"
[[projects]]
branch = "master"
+ digest = "1:79f16588b5576b1b3cd90e48d2374cc9a1a8776862d28d8fd0f23b0e15534967"
name = "github.com/eapache/go-xerial-snappy"
packages = ["."]
+ pruneopts = "UT"
revision = "776d5712da21bc4762676d614db1d8a64f4238b0"
[[projects]]
+ digest = "1:444b82bfe35c83bbcaf84e310fb81a1f9ece03edfed586483c869e2c046aef69"
name = "github.com/eapache/queue"
packages = ["."]
+ pruneopts = "UT"
revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98"
version = "v1.1.0"
[[projects]]
+ digest = "1:bb89a2542933056fcebc2950bb15ec636e623cc43c96597288aa2009f15b0ce1"
name = "github.com/eclipse/paho.mqtt.golang"
packages = [
".",
- "packets"
+ "packets",
]
+ pruneopts = "UT"
revision = "adca289fdcf8c883800aafa545bc263452290bae"
version = "v1.2.0"
[[projects]]
+ digest = "1:e15b0065da1011473634ffa0dda17731ea1d5f6fb8bb87905216d95fa29eaec0"
name = "github.com/emicklei/go-restful"
packages = [
".",
- "log"
+ "log",
]
+ pruneopts = "UT"
revision = "99f05a26a0a1c71e664ebe6a76d29b2c80333056"
version = "v2.11.1"
[[projects]]
+ digest = "1:b498b36dbb2b306d1c5205ee5236c9e60352be8f9eea9bf08186723a9f75b4f3"
name = "github.com/emirpasic/gods"
packages = [
"containers",
@@ -171,54 +206,70 @@
"lists/arraylist",
"trees",
"trees/binaryheap",
- "utils"
+ "utils",
]
+ pruneopts = "UT"
revision = "1615341f118ae12f353cc8a983f35b584342c9b3"
version = "v1.12.0"
[[projects]]
+ digest = "1:ac425d784b13d49b37a5bbed3ce022677f8f3073b216f05d6adcb9303e27fa0f"
name = "github.com/evanphx/json-patch"
packages = ["."]
+ pruneopts = "UT"
revision = "026c730a0dcc5d11f93f1cf1cc65b01247ea7b6f"
version = "v4.5.0"
[[projects]]
branch = "master"
+ digest = "1:925a2ad8acf10a486cdae4366eaf45847b16d6d7448e654814d8f1d51adeefe4"
name = "github.com/fsnotify/fsnotify"
packages = ["."]
+ pruneopts = "UT"
revision = "4bf2d1fec78374803a39307bfb8d340688f4f28e"
[[projects]]
branch = "master"
+ digest = "1:08188cf7ce7027b22e88cc23da27f17349a0ba7746271a60cbe0a70266c2346f"
name = "github.com/ghodss/yaml"
packages = ["."]
+ pruneopts = "UT"
revision = "25d852aebe32c875e9c044af3eef9c7dc6bc777f"
[[projects]]
+ digest = "1:ed15647db08b6d63666bf9755d337725960c302bbfa5e23754b4b915a4797e42"
name = "github.com/go-openapi/jsonpointer"
packages = ["."]
+ pruneopts = "UT"
revision = "ed123515f087412cd7ef02e49b0b0a5e6a79a360"
version = "v0.19.3"
[[projects]]
+ digest = "1:451fe53c19443c6941be5d4295edc973a3eb16baccb940efee94284024be03b0"
name = "github.com/go-openapi/jsonreference"
packages = ["."]
+ pruneopts = "UT"
revision = "82f31475a8f7a12bc26962f6e26ceade8ea6f66a"
version = "v0.19.3"
[[projects]]
+ digest = "1:55d1c09fc8b3320b6842565249a9e4d0f363bead6f9b8be05c3b47f2c4264eda"
name = "github.com/go-openapi/spec"
packages = ["."]
+ pruneopts = "UT"
revision = "8557d72e4f077c2dbe1e48df09e596b6fb9b7991"
version = "v0.19.4"
[[projects]]
+ digest = "1:43d0f99f53acce97119181dcd592321084690c2d462c57680ccb4472ae084949"
name = "github.com/go-openapi/swag"
packages = ["."]
+ pruneopts = "UT"
revision = "c3d0f7896d589f3babb99eea24bbc7de98108e72"
version = "v0.19.5"
[[projects]]
+ digest = "1:e5e45557e1871c967a6ccaa5b95d1233a2c01ab00615621825d1aca7383dc022"
name = "github.com/gobwas/glob"
packages = [
".",
@@ -228,11 +279,13 @@
"syntax/ast",
"syntax/lexer",
"util/runes",
- "util/strings"
+ "util/strings",
]
+ pruneopts = "UT"
revision = "e7a84e9525fe90abcda167b604e483cc959ad4aa"
[[projects]]
+ digest = "1:b51c4d0071cbb46efd912e9411ce2ec8755cb67bfa1e2a467b704c4a0469924d"
name = "github.com/gogo/protobuf"
packages = [
"gogoproto",
@@ -262,19 +315,23 @@
"protoc-gen-gogofast",
"sortkeys",
"vanity",
- "vanity/command"
+ "vanity/command",
]
+ pruneopts = "UT"
revision = "5628607bb4c51c3157aacc3a50f0ab707582b805"
version = "v1.3.1"
[[projects]]
branch = "master"
+ digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8"
name = "github.com/golang/groupcache"
packages = ["lru"]
+ pruneopts = "UT"
revision = "611e8accdfc92c4187d399e95ce826046d4c8d73"
[[projects]]
branch = "master"
+ digest = "1:3c60e8d6869358bc8f4e9095cedd83abb34a47b81fe32a263389aba1c0e8f09f"
name = "github.com/golang/protobuf"
packages = [
"proto",
@@ -288,183 +345,241 @@
"ptypes/any",
"ptypes/duration",
"ptypes/empty",
- "ptypes/timestamp"
+ "ptypes/timestamp",
]
+ pruneopts = "UT"
revision = "ed6926b37a637426117ccab59282c3839528a700"
[[projects]]
+ digest = "1:e4f5819333ac698d294fe04dbf640f84719658d5c7ce195b10060cc37292ce79"
name = "github.com/golang/snappy"
packages = ["."]
+ pruneopts = "UT"
revision = "2a8bb927dd31d8daada140a5d09578521ce5c36a"
version = "v0.0.1"
[[projects]]
+ digest = "1:1d1cbf539d9ac35eb3148129f96be5537f1a1330cadcc7e3a83b4e72a59672a3"
name = "github.com/google/go-cmp"
packages = [
"cmp",
"cmp/internal/diff",
"cmp/internal/flags",
"cmp/internal/function",
- "cmp/internal/value"
+ "cmp/internal/value",
]
+ pruneopts = "UT"
revision = "2d0692c2e9617365a95b295612ac0d4415ba4627"
version = "v0.3.1"
[[projects]]
+ digest = "1:a848ff8a9a04616f385520da14d031468ad24e4a9a38f84241d92bd045593251"
name = "github.com/google/go-github"
packages = ["github"]
+ pruneopts = "UT"
revision = "50be09d24ee31a2b0868265e76c24b9545a6eb7a"
[[projects]]
+ digest = "1:a63cff6b5d8b95638bfe300385d93b2a6d9d687734b863da8e09dc834510a690"
name = "github.com/google/go-querystring"
packages = ["query"]
+ pruneopts = "UT"
revision = "44c6ddd0a2342c386950e880b658017258da92fc"
version = "v1.0.0"
[[projects]]
+ digest = "1:a6181aca1fd5e27103f9a920876f29ac72854df7345a39f3b01e61c8c94cc8af"
name = "github.com/google/gofuzz"
packages = ["."]
+ pruneopts = "UT"
revision = "f140a6486e521aad38f5917de355cbf147cc0496"
version = "v1.0.0"
[[projects]]
+ digest = "1:582b704bebaa06b48c29b0cec224a6058a09c86883aaddabde889cd1a5f73e1b"
name = "github.com/google/uuid"
packages = ["."]
+ pruneopts = "UT"
revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4"
version = "v1.1.1"
[[projects]]
+ digest = "1:766102087520f9d54f2acc72bd6637045900ac735b4a419b128d216f0c5c4876"
name = "github.com/googleapis/gax-go"
packages = ["v2"]
+ pruneopts = "UT"
revision = "bd5b16380fd03dc758d11cef74ba2e3bc8b0e8c2"
version = "v2.0.5"
[[projects]]
+ digest = "1:ca4524b4855ded427c7003ec903a5c854f37e7b1e8e2a93277243462c5b753a8"
name = "github.com/googleapis/gnostic"
packages = [
"OpenAPIv2",
"compiler",
- "extensions"
+ "extensions",
]
+ pruneopts = "UT"
revision = "ab0dd09aa10e2952b28e12ecd35681b20463ebab"
version = "v0.3.1"
[[projects]]
branch = "master"
+ digest = "1:f14d1b50e0075fb00177f12a96dd7addf93d1e2883c25befd17285b779549795"
name = "github.com/gopherjs/gopherjs"
packages = ["js"]
- revision = "ce3c9ade29deed38a85f259f40e823cc17213830"
+ pruneopts = "UT"
+ revision = "d3ddacdb130fcd23f77a827e3b599804730be6b5"
[[projects]]
+ digest = "1:cbec35fe4d5a4fba369a656a8cd65e244ea2c743007d8f6c1ccb132acf9d1296"
+ name = "github.com/gorilla/mux"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "00bdffe0f3c77e27d2cf6f5c70232a2d3e4d9c15"
+ version = "v1.7.3"
+
+[[projects]]
+ digest = "1:e62657cca9badaa308d86e7716083e4c5933bb78e30a17743fc67f50be26f6f4"
name = "github.com/gorilla/websocket"
packages = ["."]
+ pruneopts = "UT"
revision = "c3e18be99d19e6b3e8f1559eea2c161a665c4b6b"
version = "v1.4.1"
[[projects]]
+ digest = "1:f14364057165381ea296e49f8870a9ffce2b8a95e34d6ae06c759106aaef428c"
name = "github.com/hashicorp/go-uuid"
packages = ["."]
+ pruneopts = "UT"
revision = "4f571afc59f3043a65f8fe6bf46d887b10a01d43"
version = "v1.0.1"
[[projects]]
+ digest = "1:c77361e611524ec8f2ad37c408c3c916111a70b6acf806a1200855696bf8fa4d"
name = "github.com/hashicorp/golang-lru"
packages = [
".",
- "simplelru"
+ "simplelru",
]
+ pruneopts = "UT"
revision = "7f827b33c0f158ec5dfbba01bb0b14a4541fd81d"
version = "v0.5.3"
[[projects]]
+ digest = "1:78d28d5b84a26159c67ea51996a230da4bc07cac648adaae1dfb5fc0ec8e40d3"
name = "github.com/imdario/mergo"
packages = ["."]
+ pruneopts = "UT"
revision = "1afb36080aec31e0d1528973ebe6721b191b0369"
version = "v0.3.8"
[[projects]]
branch = "master"
+ digest = "1:62fe3a7ea2050ecbd753a71889026f83d73329337ada66325cbafd5dea5f713d"
name = "github.com/jbenet/go-context"
packages = ["io"]
+ pruneopts = "UT"
revision = "d14ea06fba99483203c19d92cfcd13ebe73135f4"
[[projects]]
+ digest = "1:ae221758bdddd57f5c76f4ee5e4110af32ee62583c46299094697f8f127e63da"
name = "github.com/jcmturner/gofork"
packages = [
"encoding/asn1",
- "x/crypto/pbkdf2"
+ "x/crypto/pbkdf2",
]
+ pruneopts = "UT"
revision = "dc7c13fece037a4a36e2b3c69db4991498d30692"
version = "v1.0.0"
[[projects]]
+ digest = "1:bb81097a5b62634f3e9fec1014657855610c82d19b9a40c17612e32651e35dca"
name = "github.com/jmespath/go-jmespath"
packages = ["."]
+ pruneopts = "UT"
revision = "c2b33e84"
[[projects]]
branch = "master"
+ digest = "1:3daa28dd53624e04229a3499b6bb547b4c467d488e8293b1fc9d67a922713896"
name = "github.com/joncalhoun/qson"
packages = ["."]
+ pruneopts = "UT"
revision = "8a9cab3a62b1b693e7dfa590a215dc6217552803"
[[projects]]
+ digest = "1:beb5b4f42a25056f0aa291b5eadd21e2f2903a05d15dfe7caf7eaee7e12fa972"
name = "github.com/json-iterator/go"
packages = ["."]
+ pruneopts = "UT"
revision = "03217c3e97663914aec3faafde50d081f197a0a2"
version = "v1.1.8"
[[projects]]
+ digest = "1:076c531484852c227471112d49465873aaad47e5ad6e1aec3a5b092a436117ef"
name = "github.com/jstemmer/go-junit-report"
packages = [
".",
"formatter",
- "parser"
+ "parser",
]
+ pruneopts = "UT"
revision = "cc1f095d5cc5eca2844f5c5ea7bb37f6b9bf6cac"
version = "v0.9.1"
[[projects]]
+ digest = "1:4b63210654b1f2b664f74ec434a1bb1cb442b3d75742cc064a10808d1cca6361"
name = "github.com/jtolds/gls"
packages = ["."]
+ pruneopts = "UT"
revision = "b4936e06046bbecbb94cae9c18127ebe510a2cb9"
version = "v4.20"
[[projects]]
+ digest = "1:fd7f169f32c221b096c74e756bda16fe22d3bb448bbf74042fd0700407a1f92f"
name = "github.com/kevinburke/ssh_config"
packages = ["."]
+ pruneopts = "UT"
revision = "6cfae18c12b8934b1afba3ce8159476fdef666ba"
version = "1.0"
[[projects]]
+ digest = "1:69ababe7369aa29063b83d163bdc1b939c1480a6c0d2b44e042d016f35c6e4ad"
name = "github.com/klauspost/compress"
packages = [
"fse",
"huff0",
"snappy",
"zstd",
- "zstd/internal/xxhash"
+ "zstd/internal/xxhash",
]
+ pruneopts = "UT"
revision = "16a4d3d7137cdefd94d420f22b5c20260674b95c"
version = "v1.9.1"
[[projects]]
+ digest = "1:31e761d97c76151dde79e9d28964a812c46efc5baee4085b86f68f0c654450de"
name = "github.com/konsorten/go-windows-terminal-sequences"
packages = ["."]
+ pruneopts = "UT"
revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e"
version = "v1.0.2"
[[projects]]
+ digest = "1:927762c6729b4e72957ba3310e485ed09cf8451c5a637a52fd016a9fe09e7936"
name = "github.com/mailru/easyjson"
packages = [
"buffer",
"jlexer",
- "jwriter"
+ "jwriter",
]
+ pruneopts = "UT"
revision = "1b2b06f5f209fea48ff5922d8bfb2b9ed5d8f00b"
version = "v0.7.0"
[[projects]]
+ digest = "1:88e7456f46448df99fd934c3b90621afbaa14d29172923e36c47f42de386be56"
name = "github.com/minio/minio-go"
packages = [
".",
@@ -472,219 +587,279 @@
"pkg/encrypt",
"pkg/s3signer",
"pkg/s3utils",
- "pkg/set"
+ "pkg/set",
]
+ pruneopts = "UT"
revision = "c6c2912aa5522e5f5a505e6cba30e95f0d8456fa"
version = "v6.0.25"
[[projects]]
+ digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79"
name = "github.com/mitchellh/go-homedir"
packages = ["."]
+ pruneopts = "UT"
revision = "af06845cf3004701891bf4fdb884bfe4920b3727"
version = "v1.1.0"
[[projects]]
+ digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318"
name = "github.com/mitchellh/mapstructure"
packages = ["."]
+ pruneopts = "UT"
revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe"
version = "v1.1.2"
[[projects]]
+ digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563"
name = "github.com/modern-go/concurrent"
packages = ["."]
+ pruneopts = "UT"
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
version = "1.0.3"
[[projects]]
+ digest = "1:e32bdbdb7c377a07a9a46378290059822efdce5c8d96fe71940d87cb4f918855"
name = "github.com/modern-go/reflect2"
packages = ["."]
+ pruneopts = "UT"
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
version = "1.0.1"
[[projects]]
+ digest = "1:2ca73053216eb11c8eea2855c8099ad82773638522f91cc0542ec9759163ff3c"
name = "github.com/nats-io/go-nats"
packages = [
".",
"encoders/builtin",
- "util"
+ "util",
]
+ pruneopts = "UT"
revision = "70fe06cee50d4b6f98248d9675fb55f2a3aa7228"
version = "v1.7.2"
[[projects]]
branch = "master"
+ digest = "1:7594f474ecd4b8b2e58f572fffb6db29cbfe9b000260fefff5371865a5a43a83"
name = "github.com/nats-io/go-nats-streaming"
packages = [
".",
- "pb"
+ "pb",
]
+ pruneopts = "UT"
revision = "c4c6d40b2ba9166e155cda77274d615aed57f314"
[[projects]]
+ digest = "1:7cd07cefacf6d1e85399a47a68b32f4b8bef8ca12705bc46efb7d45c0dccb4af"
name = "github.com/nats-io/nkeys"
packages = ["."]
+ pruneopts = "UT"
revision = "abe9e4e0a640435d624e757a9110b0e59f0b6b2c"
version = "v0.1.2"
[[projects]]
+ digest = "1:599f3202ce0a754144ddc4be4c6df9c6ab27b1d722a63ede6b2e0c3a2cc338a8"
name = "github.com/nats-io/nuid"
packages = ["."]
+ pruneopts = "UT"
revision = "4b96681fa6d28dd0ab5fe79bac63b3a493d9ee94"
version = "v1.0.1"
[[projects]]
branch = "master"
+ digest = "1:9135761761efd675ec1bdc76b3449b1ce6865d1852a2fbf25bde19e255acac31"
name = "github.com/nlopes/slack"
packages = [
".",
"internal/errorsx",
"internal/timex",
"slackevents",
- "slackutilsx"
+ "slackutilsx",
]
- revision = "d06c2a2b3249b44a9c5dee8485f5a87497beb9ea"
+ pruneopts = "UT"
+ revision = "d20eeb27bf8f755e0c9aafa933eced3b574b2219"
[[projects]]
+ digest = "1:cef870622e603ac1305922eb5d380455cad27e354355ae7a855d8633ffa66197"
name = "github.com/pierrec/lz4"
packages = [
".",
- "internal/xxh32"
+ "internal/xxh32",
]
+ pruneopts = "UT"
revision = "645f9b948eee34cbcc335c70999f79c29c420fbf"
version = "v2.3.0"
[[projects]]
+ digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b"
name = "github.com/pkg/errors"
packages = ["."]
+ pruneopts = "UT"
revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
version = "v0.8.1"
[[projects]]
+ digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
+ pruneopts = "UT"
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
branch = "master"
+ digest = "1:5bbebe8ac19ecb6c87790a89faa20566e38ed0d6494a1d14c4f5b05d9ce2436c"
name = "github.com/rcrowley/go-metrics"
packages = ["."]
+ pruneopts = "UT"
revision = "cac0b30c2563378d434b5af411844adff8e32960"
[[projects]]
+ digest = "1:ed615c5430ecabbb0fb7629a182da65ecee6523900ac1ac932520860878ffcad"
name = "github.com/robfig/cron"
packages = ["."]
+ pruneopts = "UT"
revision = "b41be1df696709bb6395fe435af20370037c0b4c"
version = "v1.2.0"
[[projects]]
+ digest = "1:d917313f309bda80d27274d53985bc65651f81a5b66b820749ac7f8ef061fd04"
name = "github.com/sergi/go-diff"
packages = ["diffmatchpatch"]
+ pruneopts = "UT"
revision = "1744e2970ca51c86172c8190fadad617561ed6e7"
version = "v1.0.0"
[[projects]]
branch = "master"
+ digest = "1:e21aee53138ca0b77d49f81f15c349b36863beb2f58d7f788136b1c75364e509"
name = "github.com/sirupsen/logrus"
packages = ["."]
+ pruneopts = "UT"
revision = "67a7fdcf741f4d5cee82cb9800994ccfd4393ad0"
[[projects]]
+ digest = "1:237af0cf68bac89e21af72e6cd6b64f388854895e75f82ad08c6c011e1a8286c"
name = "github.com/smartystreets/assertions"
packages = [
".",
"internal/go-diff/diffmatchpatch",
"internal/go-render/render",
- "internal/oglematchers"
+ "internal/oglematchers",
]
+ pruneopts = "UT"
revision = "f487f9de1cd36ebab28235b9373028812fb47cbd"
version = "1.10.1"
[[projects]]
+ digest = "1:483aa658b8b58e357a07ebdfcbcb0202b46f0c0f91e9b63c8807f7d4f5cd30f9"
name = "github.com/smartystreets/goconvey"
packages = [
"convey",
"convey/gotest",
- "convey/reporting"
+ "convey/reporting",
]
+ pruneopts = "UT"
revision = "505e419363375c0dc132d3ac02632a4ee32199ca"
version = "v1.6.4"
[[projects]]
+ digest = "1:524b71991fc7d9246cc7dc2d9e0886ccb97648091c63e30eef619e6862c955dd"
name = "github.com/spf13/pflag"
packages = ["."]
+ pruneopts = "UT"
revision = "2e9d26c8c37aae03e3f9d4e90b7116f5accb7cab"
version = "v1.0.5"
[[projects]]
+ digest = "1:e4ed0afd67bf7be353921665cdac50834c867ff1bba153efc0745b755a7f5905"
name = "github.com/src-d/gcfg"
packages = [
".",
"scanner",
"token",
- "types"
+ "types",
]
+ pruneopts = "UT"
revision = "1ac3a1ac202429a54835fe8408a92880156b489d"
version = "v1.4.0"
[[projects]]
branch = "master"
+ digest = "1:3df46e572883257c46c470dc1796f9bc609d0d0d7339dd10358030649b9beb93"
name = "github.com/streadway/amqp"
packages = ["."]
+ pruneopts = "UT"
revision = "edfb9018d2714e4ec54dbaba37dbfef2bdadf0e4"
[[projects]]
+ digest = "1:ac83cf90d08b63ad5f7e020ef480d319ae890c208f8524622a2f3136e2686b02"
name = "github.com/stretchr/objx"
packages = ["."]
+ pruneopts = "UT"
revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c"
version = "v0.1.1"
[[projects]]
+ digest = "1:ad527ce5c6b2426790449db7663fe53f8bb647f9387295406794c8be001238da"
name = "github.com/stretchr/testify"
packages = [
"assert",
- "mock"
+ "mock",
]
+ pruneopts = "UT"
revision = "221dbe5ed46703ee255b1da0dec05086f5035f62"
version = "v1.4.0"
[[projects]]
+ digest = "1:5a68167017eaa32aa408397806b9d69815244238ed774439a8863ef4bc329eeb"
name = "github.com/tidwall/gjson"
packages = ["."]
+ pruneopts = "UT"
revision = "c34bf81952c067718854115564f8e55978be5e1d"
version = "v1.3.4"
[[projects]]
+ digest = "1:8453ddbed197809ee8ca28b06bd04e127bec9912deb4ba451fea7a1eca578328"
name = "github.com/tidwall/match"
packages = ["."]
+ pruneopts = "UT"
revision = "33827db735fff6510490d69a8622612558a557ed"
version = "v1.0.1"
[[projects]]
+ digest = "1:ddfe0a54e5f9b29536a6d7b2defa376f2cb2b6e4234d676d7ff214d5b097cb50"
name = "github.com/tidwall/pretty"
packages = ["."]
+ pruneopts = "UT"
revision = "1166b9ac2b65e46a43d8618d30d1554f4652d49b"
version = "v1.0.0"
[[projects]]
+ digest = "1:b70c951ba6fdeecfbd50dabe95aa5e1b973866ae9abbece46ad60348112214f2"
name = "github.com/tidwall/sjson"
packages = ["."]
+ pruneopts = "UT"
revision = "25fb082a20e29e83fb7b7ef5f5919166aad1f084"
version = "v1.0.4"
[[projects]]
+ digest = "1:766db8705204fd893db77ff5fde228362fbceac616b87ccb9976518095aac8ce"
name = "github.com/xanzy/go-gitlab"
packages = ["."]
- revision = "457d4d018eaa1fad8e6c63502cebcd11ba60164e"
- version = "v0.22.0"
+ pruneopts = "UT"
+ revision = "87a6b9db49fa4bd6efeaeec450b0c5661f94fcb5"
+ version = "v0.21.0"
[[projects]]
+ digest = "1:172f94a6b3644a8f9e6b5e5b7fc9fe1e42d424f52a0300b2e7ab1e57db73f85d"
name = "github.com/xanzy/ssh-agent"
packages = ["."]
+ pruneopts = "UT"
revision = "6a3e2ff9e7c564f36873c2e36413f634534f1c44"
version = "v0.2.1"
[[projects]]
+ digest = "1:07ca513e3b295b9aeb1f0f6fbefb8102139a2764ce0f28d02040c0eb2dc276dd"
name = "go.opencensus.io"
packages = [
".",
@@ -703,13 +878,15 @@
"trace",
"trace/internal",
"trace/propagation",
- "trace/tracestate"
+ "trace/tracestate",
]
+ pruneopts = "UT"
revision = "59d1ce35d30f3c25ba762169da2a37eab6ffa041"
version = "v0.22.1"
[[projects]]
branch = "master"
+ digest = "1:35041d2389057ddabc32b374768b133c795fa7f6eb9bcd80ac9bd820d86ea8a4"
name = "golang.org/x/crypto"
packages = [
"argon2",
@@ -732,30 +909,36 @@
"ssh",
"ssh/agent",
"ssh/knownhosts",
- "ssh/terminal"
+ "ssh/terminal",
]
- revision = "f4817d981bb690635456c5c1c6aa0585e5d45891"
+ pruneopts = "UT"
+ revision = "c7e5f84aec591254278750bee18f39e5dd19cdb5"
[[projects]]
branch = "master"
+ digest = "1:b1444bc98b5838c3116ed23e231fee4fa8509f975abd96e5d9e67e572dd01604"
name = "golang.org/x/exp"
packages = [
"apidiff",
- "cmd/apidiff"
+ "cmd/apidiff",
]
+ pruneopts = "UT"
revision = "a1ab85dbe136a36c66fbea07de5e3d62a0ce60ad"
[[projects]]
branch = "master"
+ digest = "1:21d7bad9b7da270fd2d50aba8971a041bd691165c95096a2a4c68db823cbc86a"
name = "golang.org/x/lint"
packages = [
".",
- "golint"
+ "golint",
]
+ pruneopts = "UT"
revision = "16217165b5de779cb6a5e4fc81fa9c1166fda457"
[[projects]]
branch = "master"
+ digest = "1:c01950158c9535178651a56e3a82b2088560555e9f69b1c9a0f79afbc8619c43"
name = "golang.org/x/net"
packages = [
"context",
@@ -769,42 +952,50 @@
"proxy",
"publicsuffix",
"trace",
- "websocket"
+ "websocket",
]
- revision = "7e6e90b9ea8824b29cbeee76d03ef838c9187418"
+ pruneopts = "UT"
+ revision = "a882066a44e04a21d46e51451c71e131344e830e"
[[projects]]
branch = "master"
+ digest = "1:31e33f76456ccf54819ab4a646cf01271d1a99d7712ab84bf1a9e7b61cd2031b"
name = "golang.org/x/oauth2"
packages = [
".",
"google",
"internal",
"jws",
- "jwt"
+ "jwt",
]
+ pruneopts = "UT"
revision = "0f29369cfe4552d0e4bcddc57cc75f4d7e672a33"
[[projects]]
branch = "master"
+ digest = "1:a2fc247e64b5dafd3251f12d396ec85f163d5bb38763c4997856addddf6e78d8"
name = "golang.org/x/sync"
packages = [
"errgroup",
- "semaphore"
+ "semaphore",
]
+ pruneopts = "UT"
revision = "cd5d95a43a6e21273425c7ae415d3df9ea832eeb"
[[projects]]
branch = "master"
+ digest = "1:92c957c2713e817f2c5e25735152837312e3e0458e784a5e168d725474ac8995"
name = "golang.org/x/sys"
packages = [
"cpu",
"unix",
- "windows"
+ "windows",
]
+ pruneopts = "UT"
revision = "c1f44814a5cd81a6d1cb589ef1e528bc5d305e07"
[[projects]]
+ digest = "1:66a2f252a58b4fbbad0e4e180e1d85a83c222b6bce09c3dcdef3dc87c72eda7c"
name = "golang.org/x/text"
packages = [
"collate",
@@ -823,19 +1014,23 @@
"unicode/cldr",
"unicode/norm",
"unicode/rangetable",
- "width"
+ "width",
]
+ pruneopts = "UT"
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
version = "v0.3.2"
[[projects]]
branch = "master"
+ digest = "1:a2f668c709f9078828e99cb1768cb02e876cb81030545046a32b54b2ac2a9ea8"
name = "golang.org/x/time"
packages = ["rate"]
+ pruneopts = "UT"
revision = "555d28b269f0569763d25dbe1a237ae74c6bcc82"
[[projects]]
branch = "master"
+ digest = "1:403cd4290937cc1ce67cd607806ee217720305519109b6c49d88a44144c1e100"
name = "golang.org/x/tools"
packages = [
"cmd/goimports",
@@ -856,11 +1051,13 @@
"internal/imports",
"internal/module",
"internal/semver",
- "internal/span"
+ "internal/span",
]
- revision = "f7ea15e60b12ba031eaab7e3247e845e5c7eba73"
+ pruneopts = "UT"
+ revision = "689d0f08e67ae0c77c260e137ac6a3729498c92f"
[[projects]]
+ digest = "1:526726f1b56fe59206fd9ade203359f20d0958c2feaa6c263f677cf655944c92"
name = "google.golang.org/api"
packages = [
"googleapi/transport",
@@ -871,12 +1068,14 @@
"transport",
"transport/grpc",
"transport/http",
- "transport/http/internal/propagation"
+ "transport/http/internal/propagation",
]
+ pruneopts = "UT"
revision = "4f42dad4690a01d7f6fa461106c63889ff1be027"
version = "v0.13.0"
[[projects]]
+ digest = "1:c98e9b93e6d178378530b920fe6e1aa4b3dd4972872111e83827746aa1f33ded"
name = "google.golang.org/appengine"
packages = [
".",
@@ -890,13 +1089,15 @@
"internal/socket",
"internal/urlfetch",
"socket",
- "urlfetch"
+ "urlfetch",
]
+ pruneopts = "UT"
revision = "971852bfffca25b069c31162ae8f247a3dba083b"
version = "v1.6.5"
[[projects]]
branch = "master"
+ digest = "1:fb32dd440d7b296fd516c15af931e13cab6b0ce746f1b077c5d79825bbde0037"
name = "google.golang.org/genproto"
packages = [
"googleapis/api/annotations",
@@ -904,11 +1105,13 @@
"googleapis/pubsub/v1",
"googleapis/rpc/status",
"googleapis/type/expr",
- "protobuf/field_mask"
+ "protobuf/field_mask",
]
+ pruneopts = "UT"
revision = "919d9bdd9fe6f1a5dd95ce5d5e4cdb8fd3c516d0"
[[projects]]
+ digest = "1:56380851a4d733663a711bd5ad5917ea5cb825661364aa13397c6af2061be8bc"
name = "google.golang.org/grpc"
packages = [
".",
@@ -956,36 +1159,46 @@
"serviceconfig",
"stats",
"status",
- "tap"
+ "tap",
]
+ pruneopts = "UT"
revision = "9d331e2b02dd47daeecae02790f61cc88dc75a64"
version = "v1.25.0"
[[projects]]
+ digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a"
name = "gopkg.in/inf.v0"
packages = ["."]
+ pruneopts = "UT"
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
version = "v0.9.1"
[[projects]]
+ digest = "1:2a2e303bb32696f9250d4ba3d8c25c7849a6bff66a13a9f9caf4f34774b5ee51"
name = "gopkg.in/ini.v1"
packages = ["."]
- revision = "8ee0b789944d2b0a7297c671009b3e7bc977376f"
- version = "v1.50.0"
+ pruneopts = "UT"
+ revision = "1eb383f13cde0e7be091181a93b58574638129f0"
+ version = "v1.49.0"
[[projects]]
+ digest = "1:c902038ee2d6f964d3b9f2c718126571410c5d81251cbab9fe58abd37803513c"
name = "gopkg.in/jcmturner/aescts.v1"
packages = ["."]
+ pruneopts = "UT"
revision = "f6abebb3171c4c1b1fea279cb7c7325020a26290"
version = "v1.0.1"
[[projects]]
+ digest = "1:a1a3e185c03d79a7452d5d5b4c91be4cc433f55e6ed3a35233d852c966e39013"
name = "gopkg.in/jcmturner/dnsutils.v1"
packages = ["."]
+ pruneopts = "UT"
revision = "13eeb8d49ffb74d7a75784c35e4d900607a3943c"
version = "v1.0.1"
[[projects]]
+ digest = "1:653c1ef9be253f28c38612cc0fb0571dd440a3d61a97f82e6205d53942a7b4a9"
name = "gopkg.in/jcmturner/gokrb5.v5"
packages = [
"asn1tools",
@@ -1018,12 +1231,14 @@
"messages",
"mstypes",
"pac",
- "types"
+ "types",
]
+ pruneopts = "UT"
revision = "32ba44ca5b42f17a4a9f33ff4305e70665a1bc0f"
version = "v5.3.0"
[[projects]]
+ digest = "1:dc01a587d07be012625ba63df6d4224ae6d7a83e79bfebde6d987c10538d66dd"
name = "gopkg.in/jcmturner/gokrb5.v7"
packages = [
"asn1tools",
@@ -1055,39 +1270,47 @@
"krberror",
"messages",
"pac",
- "types"
+ "types",
]
+ pruneopts = "UT"
revision = "363118e62befa8a14ff01031c025026077fe5d6d"
version = "v7.3.0"
[[projects]]
+ digest = "1:917e312d1c83bac01db5771433a141f7e4754df0ebe83d2e8edc821320aff849"
name = "gopkg.in/jcmturner/rpc.v0"
packages = ["ndr"]
+ pruneopts = "UT"
revision = "4480c480c9cd343b54b0acb5b62261cbd33d7adf"
version = "v0.0.2"
[[projects]]
+ digest = "1:0f16d9c577198e3b8d3209f5a89aabe679525b2aba2a7548714e973035c0e232"
name = "gopkg.in/jcmturner/rpc.v1"
packages = [
"mstypes",
- "ndr"
+ "ndr",
]
+ pruneopts = "UT"
revision = "99a8ce2fbf8b8087b6ed12a37c61b10f04070043"
version = "v1.1.0"
[[projects]]
+ digest = "1:eb27cfcaf8d7e4155224dd0a209f1d0ab19784fef01be142638b78b7b6becd6b"
name = "gopkg.in/src-d/go-billy.v4"
packages = [
".",
"helper/chroot",
"helper/polyfill",
"osfs",
- "util"
+ "util",
]
+ pruneopts = "UT"
revision = "780403cfc1bc95ff4d07e7b26db40a6186c5326e"
version = "v4.3.2"
[[projects]]
+ digest = "1:b2ad0a18676cd4d5b4b180709c1ea34dbabd74b3d7db0cc01e6d287d5f1e3a99"
name = "gopkg.in/src-d/go-git.v4"
packages = [
".",
@@ -1130,24 +1353,30 @@
"utils/merkletrie/filesystem",
"utils/merkletrie/index",
"utils/merkletrie/internal/frame",
- "utils/merkletrie/noder"
+ "utils/merkletrie/noder",
]
+ pruneopts = "UT"
revision = "0d1a009cbb604db18be960db5f1525b99a55d727"
version = "v4.13.1"
[[projects]]
+ digest = "1:78d374b493e747afa9fbb2119687e3740a7fb8d0ebabddfef0a012593aaecbb3"
name = "gopkg.in/warnings.v0"
packages = ["."]
+ pruneopts = "UT"
revision = "ec4a0fea49c7b46c2aeb0b51aac55779c607e52b"
version = "v0.1.2"
[[projects]]
+ digest = "1:f26a5d382387e03a40d1471dddfba85dfff9bf05352d7e42d37612677c4d3c5c"
name = "gopkg.in/yaml.v2"
packages = ["."]
+ pruneopts = "UT"
revision = "f90ceb4f409096b60e2e9076b38b304b8246e5fa"
version = "v2.2.5"
[[projects]]
+ digest = "1:131158a88aad1f94854d0aa21a64af2802d0a470fb0f01cb33c04fafd2047111"
name = "honnef.co/go/tools"
packages = [
"arg",
@@ -1174,13 +1403,15 @@
"staticcheck/vrp",
"stylecheck",
"unused",
- "version"
+ "version",
]
+ pruneopts = "UT"
revision = "afd67930eec2a9ed3e9b19f684d17a062285f16a"
version = "2019.2.3"
[[projects]]
branch = "release-1.15"
+ digest = "1:d89afbf3588e87d2c9e6efdd5528d249b32d23a12fbd7ec324f3cb373c6fb76c"
name = "k8s.io/api"
packages = [
"admissionregistration/v1beta1",
@@ -1218,12 +1449,14 @@
"settings/v1alpha1",
"storage/v1",
"storage/v1alpha1",
- "storage/v1beta1"
+ "storage/v1beta1",
]
+ pruneopts = "UT"
revision = "3a12735a829ac2f7817379647da6d47c39327512"
[[projects]]
branch = "release-1.15"
+ digest = "1:b2ba899970541943ec0a2f1bdef707b029d8591236fb7dab27b222f84037d8ef"
name = "k8s.io/apimachinery"
packages = [
"pkg/api/errors",
@@ -1270,12 +1503,13 @@
"pkg/watch",
"third_party/forked/golang/json",
"third_party/forked/golang/netutil",
- "third_party/forked/golang/reflect"
+ "third_party/forked/golang/reflect",
]
+ pruneopts = "UT"
revision = "31ade1b30762be61c32b2e8db2a11aa8b0b8960e"
[[projects]]
- branch = "release-12.0"
+ digest = "1:25e5278fc841f46e9b465a8c67f3304b32ac85ced3b7057438318db79f71c1ed"
name = "k8s.io/client-go"
packages = [
"discovery",
@@ -1468,12 +1702,15 @@
"util/homedir",
"util/keyutil",
"util/retry",
- "util/workqueue"
+ "util/workqueue",
]
- revision = "5f2132fc4383659da452dba21a1c6c9890b0e062"
+ pruneopts = "UT"
+ revision = "78d2af792babf2dd937ba2e2a8d99c753a5eda89"
+ version = "v12.0.0"
[[projects]]
branch = "release-1.15"
+ digest = "1:f41480fd8c5f54b13326ee0f2ee398d5734789b790dbc4db57f9b08a0d63139a"
name = "k8s.io/code-generator"
packages = [
"cmd/client-gen",
@@ -1485,11 +1722,13 @@
"cmd/client-gen/path",
"cmd/client-gen/types",
"pkg/namer",
- "pkg/util"
+ "pkg/util",
]
+ pruneopts = "T"
revision = "18da4a14b22b17d2fa761e50037fabfbacec225b"
[[projects]]
+ digest = "1:a9f99f1c11620be972d49d2e4e296031a5fbc168ada49a9e618d9b35f751f119"
name = "k8s.io/gengo"
packages = [
"args",
@@ -1499,18 +1738,22 @@
"generator",
"namer",
"parser",
- "types"
+ "types",
]
+ pruneopts = "T"
revision = "b90029ef6cd877cb3f422d75b3a07707e3aac6b7"
[[projects]]
+ digest = "1:93e82f25d75aba18436ad1ac042cb49493f096011f2541075721ed6f9e05c044"
name = "k8s.io/klog"
packages = ["."]
+ pruneopts = "UT"
revision = "2ca9ad30301bf30a8a6e0fa2110db6b8df699a91"
version = "v1.0.0"
[[projects]]
branch = "master"
+ digest = "1:94ad85c6f3cfad58b9f602b9a2d4af530e8db8f861bca708c4dd236f771ea1a4"
name = "k8s.io/kube-openapi"
packages = [
"cmd/openapi-gen",
@@ -1519,29 +1762,128 @@
"pkg/generators",
"pkg/generators/rules",
"pkg/util/proto",
- "pkg/util/sets"
+ "pkg/util/sets",
]
- revision = "30be4d16710ac61bce31eb28a01054596fe6a9f1"
+ pruneopts = "UT"
+ revision = "0270cf2f1c1d995d34b36019a6f65d58e6e33ad4"
[[projects]]
branch = "master"
+ digest = "1:2d3f59daa4b479ff4e100a2e1d8fea6780040fdadc177869531fe4cc29407f55"
name = "k8s.io/utils"
packages = [
"buffer",
"integer",
- "trace"
+ "trace",
]
+ pruneopts = "UT"
revision = "2b95a09bc58df43d4032504619706b6a38293a47"
[[projects]]
+ digest = "1:7719608fe0b52a4ece56c2dde37bedd95b938677d1ab0f84b8a7852e4c59f849"
name = "sigs.k8s.io/yaml"
packages = ["."]
+ pruneopts = "UT"
revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480"
version = "v1.1.0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
- inputs-digest = "df67feb7bfc29e71c524d42fa1f6878e4b4ffbe085ece6cf09c30a8875269e0f"
+ input-imports = [
+ "cloud.google.com/go/pubsub",
+ "github.com/Knetic/govaluate",
+ "github.com/Shopify/sarama",
+ "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1",
+ "github.com/aws/aws-sdk-go/aws",
+ "github.com/aws/aws-sdk-go/aws/credentials",
+ "github.com/aws/aws-sdk-go/aws/session",
+ "github.com/aws/aws-sdk-go/service/sns",
+ "github.com/aws/aws-sdk-go/service/sqs",
+ "github.com/colinmarc/hdfs",
+ "github.com/eclipse/paho.mqtt.golang",
+ "github.com/fsnotify/fsnotify",
+ "github.com/ghodss/yaml",
+ "github.com/go-openapi/spec",
+ "github.com/gobwas/glob",
+ "github.com/gogo/protobuf/protoc-gen-gofast",
+ "github.com/gogo/protobuf/protoc-gen-gogofast",
+ "github.com/golang/protobuf/proto",
+ "github.com/golang/protobuf/protoc-gen-go",
+ "github.com/google/go-github/github",
+ "github.com/google/uuid",
+ "github.com/gorilla/mux",
+ "github.com/joncalhoun/qson",
+ "github.com/minio/minio-go",
+ "github.com/mitchellh/mapstructure",
+ "github.com/nats-io/go-nats",
+ "github.com/nats-io/go-nats-streaming",
+ "github.com/nlopes/slack",
+ "github.com/nlopes/slack/slackevents",
+ "github.com/pkg/errors",
+ "github.com/robfig/cron",
+ "github.com/sirupsen/logrus",
+ "github.com/smartystreets/goconvey/convey",
+ "github.com/streadway/amqp",
+ "github.com/stretchr/testify/assert",
+ "github.com/stretchr/testify/mock",
+ "github.com/tidwall/gjson",
+ "github.com/tidwall/sjson",
+ "github.com/xanzy/go-gitlab",
+ "golang.org/x/crypto/ssh",
+ "google.golang.org/api/option",
+ "google.golang.org/grpc",
+ "google.golang.org/grpc/codes",
+ "google.golang.org/grpc/connectivity",
+ "google.golang.org/grpc/metadata",
+ "google.golang.org/grpc/status",
+ "gopkg.in/jcmturner/gokrb5.v5/client",
+ "gopkg.in/jcmturner/gokrb5.v5/config",
+ "gopkg.in/jcmturner/gokrb5.v5/credentials",
+ "gopkg.in/jcmturner/gokrb5.v5/keytab",
+ "gopkg.in/src-d/go-git.v4",
+ "gopkg.in/src-d/go-git.v4/config",
+ "gopkg.in/src-d/go-git.v4/plumbing",
+ "gopkg.in/src-d/go-git.v4/plumbing/transport",
+ "gopkg.in/src-d/go-git.v4/plumbing/transport/http",
+ "gopkg.in/src-d/go-git.v4/plumbing/transport/ssh",
+ "k8s.io/api/apps/v1",
+ "k8s.io/api/core/v1",
+ "k8s.io/apimachinery/pkg/api/errors",
+ "k8s.io/apimachinery/pkg/apis/meta/v1",
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured",
+ "k8s.io/apimachinery/pkg/fields",
+ "k8s.io/apimachinery/pkg/labels",
+ "k8s.io/apimachinery/pkg/runtime",
+ "k8s.io/apimachinery/pkg/runtime/schema",
+ "k8s.io/apimachinery/pkg/runtime/serializer",
+ "k8s.io/apimachinery/pkg/selection",
+ "k8s.io/apimachinery/pkg/types",
+ "k8s.io/apimachinery/pkg/util/intstr",
+ "k8s.io/apimachinery/pkg/util/runtime",
+ "k8s.io/apimachinery/pkg/util/wait",
+ "k8s.io/apimachinery/pkg/watch",
+ "k8s.io/client-go/discovery",
+ "k8s.io/client-go/discovery/fake",
+ "k8s.io/client-go/dynamic",
+ "k8s.io/client-go/dynamic/dynamicinformer",
+ "k8s.io/client-go/dynamic/fake",
+ "k8s.io/client-go/informers/core/v1",
+ "k8s.io/client-go/kubernetes",
+ "k8s.io/client-go/kubernetes/fake",
+ "k8s.io/client-go/kubernetes/scheme",
+ "k8s.io/client-go/rest",
+ "k8s.io/client-go/testing",
+ "k8s.io/client-go/tools/cache",
+ "k8s.io/client-go/tools/clientcmd",
+ "k8s.io/client-go/tools/portforward",
+ "k8s.io/client-go/transport/spdy",
+ "k8s.io/client-go/util/flowcontrol",
+ "k8s.io/client-go/util/workqueue",
+ "k8s.io/code-generator/cmd/client-gen",
+ "k8s.io/gengo/examples/deepcopy-gen",
+ "k8s.io/kube-openapi/cmd/openapi-gen",
+ "k8s.io/kube-openapi/pkg/common",
+ ]
solver-name = "gps-cdcl"
solver-version = 1
diff --git a/Gopkg.toml b/Gopkg.toml
index 3b77506fe9..5a5f52b008 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -92,6 +92,10 @@ required = [
name = "github.com/Knetic/govaluate"
branch = "master"
+[[constraint]]
+ name = "github.com/gorilla/mux"
+ version = "v1.7.3"
+
[[constraint]]
name = "github.com/colinmarc/hdfs"
revision = "48eb8d6c34a97ffc73b406356f0f2e1c569b42a5"
@@ -109,7 +113,7 @@ required = [
name = "k8s.io/apimachinery"
[[override]]
- branch = "release-12.0"
+ version = "v12.0.0"
name = "k8s.io/client-go"
[prune]
diff --git a/Makefile b/Makefile
index d272d50b30..02583d9876 100644
--- a/Makefile
+++ b/Makefile
@@ -17,7 +17,7 @@ override LDFLAGS += \
# docker image publishing options
DOCKER_PUSH?=true
IMAGE_NAMESPACE?=argoproj
-IMAGE_TAG?=v0.11
+IMAGE_TAG?=v0.12-rc
ifeq (${DOCKER_PUSH},true)
ifndef IMAGE_NAMESPACE
@@ -35,13 +35,13 @@ endif
# Build the project images
.DELETE_ON_ERROR:
-all: sensor-linux sensor-controller-linux gateway-controller-linux gateway-client-linux webhook-linux calendar-linux resource-linux artifact-linux file-linux nats-linux kafka-linux amqp-linux mqtt-linux storage-grid-linux github-linux hdfs-linux gitlab-linux sns-linux sqs-linux pubsub-linux slack-linux
+all: sensor-linux sensor-controller-linux gateway-controller-linux gateway-client-linux webhook-linux calendar-linux resource-linux minio-linux file-linux nats-linux kafka-linux amqp-linux mqtt-linux storage-grid-linux github-linux hdfs-linux gitlab-linux sns-linux sqs-linux pubsub-linux slack-linux
-all-images: sensor-image sensor-controller-image gateway-controller-image gateway-client-image webhook-image calendar-image resource-image artifact-image file-image nats-image kafka-image amqp-image mqtt-image storage-grid-image github-image gitlab-image sns-image pubsub-image hdfs-image sqs-image slack-image
+all-images: sensor-image sensor-controller-image gateway-controller-image gateway-client-image webhook-image calendar-image resource-image minio-image file-image nats-image kafka-image amqp-image mqtt-image storage-grid-image github-image gitlab-image sns-image pubsub-image hdfs-image sqs-image slack-image
all-controller-images: sensor-controller-image gateway-controller-image
-all-core-gateway-images: webhook-image calendar-image artifact-image file-image nats-image kafka-image amqp-image mqtt-image resource-image
+all-core-gateway-images: webhook-image calendar-image minio-image file-image nats-image kafka-image amqp-image mqtt-image resource-image
.PHONY: all clean test
@@ -58,7 +58,7 @@ sensor-image: sensor-linux
# Sensor controller
sensor-controller:
- go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/sensor-controller ./cmd/controllers/sensor
+ go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/sensor-controller ./controllers/sensor/cmd
sensor-controller-linux:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make sensor-controller
@@ -69,7 +69,7 @@ sensor-controller-image: sensor-controller-linux
# Gateway controller
gateway-controller:
- go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/gateway-controller ./cmd/controllers/gateway/main.go
+ go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/gateway-controller ./controllers/gateway/cmd
gateway-controller-linux:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make gateway-controller
@@ -81,196 +81,196 @@ gateway-controller-image: gateway-controller-linux
# Gateway client binary
gateway-client:
- go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/gateway-client ./gateways/cmd/main.go
+ go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/gateway-client ./gateways/client
gateway-client-linux:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make gateway-client
gateway-client-image: gateway-client-linux
- docker build -t $(IMAGE_PREFIX)gateway-client:$(IMAGE_TAG) -f ./gateways/Dockerfile .
+ docker build -t $(IMAGE_PREFIX)gateway-client:$(IMAGE_TAG) -f ./gateways/client/Dockerfile .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)gateway-client:$(IMAGE_TAG) ; fi
# gateway binaries
webhook:
- go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/webhook-gateway ./gateways/core/webhook/cmd/
+ go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/webhook-gateway ./gateways/server/webhook/cmd/
webhook-linux:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make webhook
webhook-image: webhook-linux
- docker build -t $(IMAGE_PREFIX)webhook-gateway:$(IMAGE_TAG) -f ./gateways/core/webhook/Dockerfile .
+ docker build -t $(IMAGE_PREFIX)webhook-gateway:$(IMAGE_TAG) -f ./gateways/server/webhook/Dockerfile .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)webhook-gateway:$(IMAGE_TAG) ; fi
calendar:
- go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/calendar-gateway ./gateways/core/calendar/cmd/
+ go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/calendar-gateway ./gateways/server/calendar/cmd/
calendar-linux:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make calendar
calendar-image: calendar-linux
- docker build -t $(IMAGE_PREFIX)calendar-gateway:$(IMAGE_TAG) -f ./gateways/core/calendar/Dockerfile .
+ docker build -t $(IMAGE_PREFIX)calendar-gateway:$(IMAGE_TAG) -f ./gateways/server/calendar/Dockerfile .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)calendar-gateway:$(IMAGE_TAG) ; fi
resource:
- go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/resource-gateway ./gateways/core/resource/cmd
+ go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/resource-gateway ./gateways/server/resource/cmd
resource-linux:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make resource
resource-image: resource-linux
- docker build -t $(IMAGE_PREFIX)resource-gateway:$(IMAGE_TAG) -f ./gateways/core/resource/Dockerfile .
+ docker build -t $(IMAGE_PREFIX)resource-gateway:$(IMAGE_TAG) -f ./gateways/server/resource/Dockerfile .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)resource-gateway:$(IMAGE_TAG) ; fi
-artifact:
- go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/artifact-gateway ./gateways/core/artifact/cmd
+minio:
+ go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/minio-gateway ./gateways/server/minio/cmd
-artifact-linux:
- CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make artifact
+minio-linux:
+ CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make minio
-artifact-image: artifact-linux
- docker build -t $(IMAGE_PREFIX)artifact-gateway:$(IMAGE_TAG) -f ./gateways/core/artifact/Dockerfile .
- @if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)artifact-gateway:$(IMAGE_TAG) ; fi
+minio-image: minio-linux
+ docker build -t $(IMAGE_PREFIX)minio-gateway:$(IMAGE_TAG) -f ./gateways/server/minio/Dockerfile .
+ @if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)minio-gateway:$(IMAGE_TAG) ; fi
file:
- go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/file-gateway ./gateways/core/file/cmd
+ go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/file-gateway ./gateways/server/file/cmd
file-linux:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make file
file-image: file-linux
- docker build -t $(IMAGE_PREFIX)file-gateway:$(IMAGE_TAG) -f ./gateways/core/file/Dockerfile .
+ docker build -t $(IMAGE_PREFIX)file-gateway:$(IMAGE_TAG) -f ./gateways/server/file/Dockerfile .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)file-gateway:$(IMAGE_TAG) ; fi
#Stream gateways
nats:
- go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/nats-gateway ./gateways/core/stream/nats/cmd
+ go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/nats-gateway ./gateways/server/nats/cmd
nats-linux:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make nats
nats-image: nats-linux
- docker build -t $(IMAGE_PREFIX)nats-gateway:$(IMAGE_TAG) -f ./gateways/core/stream/nats/Dockerfile .
+ docker build -t $(IMAGE_PREFIX)nats-gateway:$(IMAGE_TAG) -f ./gateways/server/nats/Dockerfile .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)nats-gateway:$(IMAGE_TAG) ; fi
kafka:
- go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/kafka-gateway ./gateways/core/stream/kafka/cmd
+ go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/kafka-gateway ./gateways/server/kafka/cmd
kafka-linux:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make kafka
kafka-image: kafka-linux
- docker build -t $(IMAGE_PREFIX)kafka-gateway:$(IMAGE_TAG) -f ./gateways/core/stream/kafka/Dockerfile .
+ docker build -t $(IMAGE_PREFIX)kafka-gateway:$(IMAGE_TAG) -f ./gateways/server/kafka/Dockerfile .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)kafka-gateway:$(IMAGE_TAG) ; fi
amqp:
- go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/amqp-gateway ./gateways/core/stream/amqp/cmd
+ go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/amqp-gateway ./gateways/server/amqp/cmd
amqp-linux:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make amqp
amqp-image: amqp-linux
- docker build -t $(IMAGE_PREFIX)amqp-gateway:$(IMAGE_TAG) -f ./gateways/core/stream/amqp/Dockerfile .
+ docker build -t $(IMAGE_PREFIX)amqp-gateway:$(IMAGE_TAG) -f ./gateways/server/amqp/Dockerfile .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)amqp-gateway:$(IMAGE_TAG) ; fi
mqtt:
- go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/mqtt-gateway ./gateways/core/stream/mqtt/cmd
+ go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/mqtt-gateway ./gateways/server/mqtt/cmd
mqtt-linux:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make mqtt
mqtt-image: mqtt-linux
- docker build -t $(IMAGE_PREFIX)mqtt-gateway:$(IMAGE_TAG) -f ./gateways/core/stream/mqtt/Dockerfile .
+ docker build -t $(IMAGE_PREFIX)mqtt-gateway:$(IMAGE_TAG) -f ./gateways/server/mqtt/Dockerfile .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)mqtt-gateway:$(IMAGE_TAG) ; fi
# Custom gateways
storage-grid:
- go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/storagegrid-gateway ./gateways/community/storagegrid/cmd
+ go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/storagegrid-gateway ./gateways/server/storagegrid/cmd
storage-grid-linux:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make storage-grid
storage-grid-image: storage-grid-linux
- docker build -t $(IMAGE_PREFIX)storage-grid-gateway:$(IMAGE_TAG) -f ./gateways/community/storagegrid/Dockerfile .
+ docker build -t $(IMAGE_PREFIX)storage-grid-gateway:$(IMAGE_TAG) -f ./gateways/server/storagegrid/Dockerfile .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)storage-grid-gateway:$(IMAGE_TAG) ; fi
gitlab:
- go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/gitlab-gateway ./gateways/community/gitlab/cmd
+ go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/gitlab-gateway ./gateways/server/gitlab/cmd
gitlab-linux:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make gitlab
gitlab-image: gitlab-linux
- docker build -t $(IMAGE_PREFIX)gitlab-gateway:$(IMAGE_TAG) -f ./gateways/community/gitlab/Dockerfile .
+ docker build -t $(IMAGE_PREFIX)gitlab-gateway:$(IMAGE_TAG) -f ./gateways/server/gitlab/Dockerfile .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)gitlab-gateway:$(IMAGE_TAG) ; fi
github:
- go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/github-gateway ./gateways/community/github/cmd
+ go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/github-gateway ./gateways/server/github/cmd
github-linux:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make github
github-image: github-linux
- docker build -t $(IMAGE_PREFIX)github-gateway:$(IMAGE_TAG) -f ./gateways/community/github/Dockerfile .
+ docker build -t $(IMAGE_PREFIX)github-gateway:$(IMAGE_TAG) -f ./gateways/server/github/Dockerfile .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)github-gateway:$(IMAGE_TAG) ; fi
sns:
- go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/aws-sns-gateway ./gateways/community/aws-sns/cmd
+ go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/aws-sns-gateway ./gateways/server/aws-sns/cmd
sns-linux:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make sns
sns-image:
- docker build -t $(IMAGE_PREFIX)aws-sns-gateway:$(IMAGE_TAG) -f ./gateways/community/aws-sns/Dockerfile .
+ docker build -t $(IMAGE_PREFIX)aws-sns-gateway:$(IMAGE_TAG) -f ./gateways/server/aws-sns/Dockerfile .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)aws-sns-gateway:$(IMAGE_TAG) ; fi
pubsub:
- go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/gcp-pubsub-gateway ./gateways/community/gcp-pubsub/cmd
+ go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/gcp-pubsub-gateway ./gateways/server/gcp-pubsub/cmd
pubsub-linux:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make pubsub
pubsub-image: pubsub-linux
- docker build -t $(IMAGE_PREFIX)gcp-pubsub-gateway:$(IMAGE_TAG) -f ./gateways/community/gcp-pubsub/Dockerfile .
+ docker build -t $(IMAGE_PREFIX)gcp-pubsub-gateway:$(IMAGE_TAG) -f ./gateways/server/gcp-pubsub/Dockerfile .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)gcp-pubsub-gateway:$(IMAGE_TAG) ; fi
hdfs:
- go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/hdfs-gateway ./gateways/community/hdfs/cmd
+ go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/hdfs-gateway ./gateways/server/hdfs/cmd
hdfs-linux:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make hdfs
hdfs-image: hdfs-linux
- docker build -t $(IMAGE_PREFIX)hdfs-gateway:$(IMAGE_TAG) -f ./gateways/community/hdfs/Dockerfile .
+ docker build -t $(IMAGE_PREFIX)hdfs-gateway:$(IMAGE_TAG) -f ./gateways/server/hdfs/Dockerfile .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)hdfs-gateway:$(IMAGE_TAG) ; fi
sqs:
- go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/aws-sqs-gateway ./gateways/community/aws-sqs/cmd
+ go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/aws-sqs-gateway ./gateways/server/aws-sqs/cmd
sqs-linux:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make sqs
sqs-image: sqs-linux
- docker build -t $(IMAGE_PREFIX)aws-sqs-gateway:$(IMAGE_TAG) -f ./gateways/community/aws-sqs/Dockerfile .
+ docker build -t $(IMAGE_PREFIX)aws-sqs-gateway:$(IMAGE_TAG) -f ./gateways/server/aws-sqs/Dockerfile .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)aws-sqs-gateway:$(IMAGE_TAG) ; fi
slack:
- go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/slack-gateway ./gateways/community/slack/cmd
+ go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/slack-gateway ./gateways/server/slack/cmd
slack-linux:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 make slack
slack-image: slack-linux
- docker build -t $(IMAGE_PREFIX)slack-gateway:$(IMAGE_TAG) -f ./gateways/community/slack/Dockerfile .
+ docker build -t $(IMAGE_PREFIX)slack-gateway:$(IMAGE_TAG) -f ./gateways/server/slack/Dockerfile .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)slack-gateway:$(IMAGE_TAG) ; fi
test:
diff --git a/README.md b/README.md
index 6a1dacdc19..12e0b386b4 100644
--- a/README.md
+++ b/README.md
@@ -28,17 +28,40 @@ and trigger Kubernetes objects after successful event dependencies resolution.
* Supports [CloudEvents](https://cloudevents.io/) for describing event data and transmission.
* Ability to manage event sources at runtime.
-## Documentation
-To learn more about Argo Events, [go to complete documentation](https://argoproj.github.io/argo-events/)
+## Getting Started
+Follow [setup](https://argoproj.github.io/argo-events/installation/) instructions for installation. To see the Argo-Events in action, follow the
+[getting started](https://argoproj.github.io/argo-events/getting_started/) guide.
+Complete documentation is available at https://argoproj.github.io/argo-events/
+
+[![asciicast](https://asciinema.org/a/AKkYwzEakSUsLqH8mMORA4kza.png)](https://asciinema.org/a/AKkYwzEakSUsLqH8mMORA4kza)
+
+## Available Event Listeners
+1. AMQP
+2. AWS SNS
+3. AWS SQS
+4. Cron Schedules
+5. GCP PubSub
+6. GitHub
+7. GitLab
+8. HDFS
+9. File Based Events
+10. Kafka
+11. Minio
+12. NATS
+13. MQTT
+14. K8s Resources
+15. Slack
+16. NetApp StorageGrid
+17. Webhooks
## Who uses Argo Events?
Organizations below are **officially** using Argo Events. Please send a PR with your organization name if you are using Argo Events.
+* [BioBox Analytics](https://biobox.io)
* [BlackRock](https://www.blackrock.com/)
* [Canva](https://www.canva.com/)
* [Fairwinds](https://fairwinds.com/)
* [Intuit](https://www.intuit.com/)
* [Viaduct](https://www.viaduct.ai/)
-* [BioBox Analytics](https://biobox.io)
## Contribute
Read and abide by the [Argo Events Code of Conduct](https://github.com/argoproj/argo-events/blob/master/CODE_OF_CONDUCT.md)
diff --git a/VERSION b/VERSION
index 0eb41820ee..5473372f79 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-0.11
\ No newline at end of file
+0.12-rc
\ No newline at end of file
diff --git a/api/event-source.html b/api/event-source.html
new file mode 100644
index 0000000000..0ffead5d76
--- /dev/null
+++ b/api/event-source.html
@@ -0,0 +1,1755 @@
+
Packages:
+
+argoproj.io/v1alpha1
+
+
Package v1alpha1 is the v1alpha1 version of the API.
+
+Resource Types:
+
+AMQPEventSource
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
AMQPEventSource refers to an event-source for AMQP stream events
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+url
+
+string
+
+ |
+
+ URL for rabbitmq service
+ |
+
+
+
+exchangeName
+
+string
+
+ |
+
+ ExchangeName is the exchange name
+For more information, visit https://www.rabbitmq.com/tutorials/amqp-concepts.html
+ |
+
+
+
+exchangeType
+
+string
+
+ |
+
+ ExchangeType is rabbitmq exchange type
+ |
+
+
+
+routingKey
+
+string
+
+ |
+
+ Routing key for bindings
+ |
+
+
+
+connectionBackoff
+
+github.com/argoproj/argo-events/common.Backoff
+
+ |
+
+(Optional)
+ Backoff holds parameters applied to connection.
+ |
+
+
+
+CalendarEventSource
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
CalendarEventSource describes a time based dependency. One of the fields (schedule, interval, or recurrence) must be passed.
+Schedule takes precedence over interval; interval takes precedence over recurrence
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+schedule
+
+string
+
+ |
+
+ Schedule is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron
+ |
+
+
+
+interval
+
+string
+
+ |
+
+ Interval is a string that describes an interval duration, e.g. 1s, 30m, 2h…
+ |
+
+
+
+exclusionDates
+
+[]string
+
+ |
+
+ ExclusionDates defines the list of DATE-TIME exceptions for recurring events.
+ |
+
+
+
+timezone
+
+string
+
+ |
+
+(Optional)
+ Timezone in which to run the schedule
+ |
+
+
+
+userPayload
+
+encoding/json.RawMessage
+
+ |
+
+(Optional)
+ UserPayload will be sent to sensor as extra data once the event is triggered
+ |
+
+
+
+EventSource
+
+
+
EventSource is the definition of a eventsource resource
+
+
+EventSourceSpec
+
+
+(Appears on:
+EventSource)
+
+
+
EventSourceSpec refers to specification of event-source resource
+
+
+EventSourceStatus
+
+
+(Appears on:
+EventSource)
+
+
+
EventSourceStatus holds the status of the event-source resource
+
+
+FileEventSource
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
FileEventSource describes an event-source for file related events.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+eventType
+
+string
+
+ |
+
+ Type of file operations to watch
+Refer https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go for more information
+ |
+
+
+
+watchPathConfig
+
+github.com/argoproj/argo-events/gateways/server/common/fsevent.WatchPathConfig
+
+ |
+
+ WatchPathConfig contains configuration about the file path to watch
+ |
+
+
+
+GithubEventSource
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
GithubEventSource refers to event-source for github related events
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+id
+
+int64
+
+ |
+
+ Id is the webhook’s id
+ |
+
+
+
+webhook
+
+github.com/argoproj/argo-events/gateways/server/common/webhook.Context
+
+ |
+
+ Webhook refers to the configuration required to run a http server
+ |
+
+
+
+owner
+
+string
+
+ |
+
+ Owner refers to GitHub owner name i.e. argoproj
+ |
+
+
+
+repository
+
+string
+
+ |
+
+ Repository refers to GitHub repo name i.e. argo-events
+ |
+
+
+
+events
+
+[]string
+
+ |
+
+ Events refer to Github events to subscribe to which the gateway will subscribe
+ |
+
+
+
+apiToken
+
+
+Kubernetes core/v1.SecretKeySelector
+
+
+ |
+
+ APIToken refers to a K8s secret containing github api token
+ |
+
+
+
+webhookSecret
+
+
+Kubernetes core/v1.SecretKeySelector
+
+
+ |
+
+(Optional)
+ WebhookSecret refers to K8s secret containing GitHub webhook secret
+https://developer.github.com/webhooks/securing/
+ |
+
+
+
+insecure
+
+bool
+
+ |
+
+ Insecure tls verification
+ |
+
+
+
+active
+
+bool
+
+ |
+
+(Optional)
+ Active refers to status of the webhook for event deliveries.
+https://developer.github.com/webhooks/creating/#active
+ |
+
+
+
+contentType
+
+string
+
+ |
+
+ ContentType of the event delivery
+ |
+
+
+
+githubBaseURL
+
+string
+
+ |
+
+(Optional)
+ GitHub base URL (for GitHub Enterprise)
+ |
+
+
+
+githubUploadURL
+
+string
+
+ |
+
+(Optional)
+ GitHub upload URL (for GitHub Enterprise)
+ |
+
+
+
+namespace
+
+string
+
+ |
+
+ Namespace refers to Kubernetes namespace which is used to retrieve webhook secret and api token from.
+ |
+
+
+
+deleteHookOnFinish
+
+bool
+
+ |
+
+(Optional)
+ DeleteHookOnFinish determines whether to delete the GitHub hook for the repository once the event source is stopped.
+ |
+
+
+
+GitlabEventSource
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
GitlabEventSource refers to event-source related to Gitlab events
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+webhook
+
+github.com/argoproj/argo-events/gateways/server/common/webhook.Context
+
+ |
+
+ Webhook holds configuration to run a http server
+ |
+
+
+
+projectId
+
+string
+
+ |
+
+ ProjectId is the id of project for which integration needs to setup
+ |
+
+
+
+event
+
+string
+
+ |
+
+ Event is a gitlab event to listen to.
+Refer https://github.com/xanzy/go-gitlab/blob/bf34eca5d13a9f4c3f501d8a97b8ac226d55e4d9/projects.go#L794.
+ |
+
+
+
+accessToken
+
+
+Kubernetes core/v1.SecretKeySelector
+
+
+ |
+
+ AccessToken is reference to k8 secret which holds the gitlab api access information
+ |
+
+
+
+enableSSLVerification
+
+bool
+
+ |
+
+(Optional)
+ EnableSSLVerification to enable ssl verification
+ |
+
+
+
+gitlabBaseURL
+
+string
+
+ |
+
+ GitlabBaseURL is the base URL for API requests to a custom endpoint
+ |
+
+
+
+namespace
+
+string
+
+ |
+
+ Namespace refers to Kubernetes namespace which is used to retrieve access token from.
+ |
+
+
+
+deleteHookOnFinish
+
+bool
+
+ |
+
+(Optional)
+ DeleteHookOnFinish determines whether to delete the GitLab hook for the project once the event source is stopped.
+ |
+
+
+
+HDFSEventSource
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
HDFSEventSource refers to event-source for HDFS related events
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+WatchPathConfig
+
+github.com/argoproj/argo-events/gateways/server/common/fsevent.WatchPathConfig
+
+ |
+
+
+(Members of WatchPathConfig are embedded into this type.)
+
+ |
+
+
+
+type
+
+string
+
+ |
+
+ Type of file operations to watch
+ |
+
+
+
+checkInterval
+
+string
+
+ |
+
+ CheckInterval is a string that describes an interval duration to check the directory state, e.g. 1s, 30m, 2h… (defaults to 1m)
+ |
+
+
+
+addresses
+
+[]string
+
+ |
+
+ Addresses is accessible addresses of HDFS name nodes
+ |
+
+
+
+hdfsUser
+
+string
+
+ |
+
+ HDFSUser is the user to access HDFS file system.
+It is ignored if either ccache or keytab is used.
+ |
+
+
+
+krbCCacheSecret
+
+
+Kubernetes core/v1.SecretKeySelector
+
+
+ |
+
+ KrbCCacheSecret is the secret selector for Kerberos ccache
+Either ccache or keytab can be set to use Kerberos.
+ |
+
+
+
+krbKeytabSecret
+
+
+Kubernetes core/v1.SecretKeySelector
+
+
+ |
+
+ KrbKeytabSecret is the secret selector for Kerberos keytab
+Either ccache or keytab can be set to use Kerberos.
+ |
+
+
+
+krbUsername
+
+string
+
+ |
+
+ KrbUsername is the Kerberos username used with Kerberos keytab
+It must be set if keytab is used.
+ |
+
+
+
+krbRealm
+
+string
+
+ |
+
+ KrbRealm is the Kerberos realm used with Kerberos keytab
+It must be set if keytab is used.
+ |
+
+
+
+krbConfigConfigMap
+
+
+Kubernetes core/v1.ConfigMapKeySelector
+
+
+ |
+
+ KrbConfig is the configmap selector for Kerberos config as string
+It must be set if either ccache or keytab is used.
+ |
+
+
+
+krbServicePrincipalName
+
+string
+
+ |
+
+ KrbServicePrincipalName is the principal name of Kerberos service
+It must be set if either ccache or keytab is used.
+ |
+
+
+
+namespace
+
+string
+
+ |
+
+ Namespace refers to Kubernetes namespace which is used to retrieve cache secret and ket tab secret from.
+ |
+
+
+
+KafkaEventSource
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
KafkaEventSource refers to event-source for Kafka related events
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+url
+
+string
+
+ |
+
+ URL to kafka cluster
+ |
+
+
+
+partition
+
+string
+
+ |
+
+ Partition name
+ |
+
+
+
+topic
+
+string
+
+ |
+
+ Topic name
+ |
+
+
+
+connectionBackoff
+
+github.com/argoproj/argo-events/common.Backoff
+
+ |
+
+ Backoff holds parameters applied to connection.
+ |
+
+
+
+MQTTEventSource
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
MQTTEventSource refers to event-source for MQTT related events
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+url
+
+string
+
+ |
+
+ URL to connect to broker
+ |
+
+
+
+topic
+
+string
+
+ |
+
+ Topic name
+ |
+
+
+
+clientId
+
+string
+
+ |
+
+ ClientID is the id of the client
+ |
+
+
+
+connectionBackoff
+
+github.com/argoproj/argo-events/common.Backoff
+
+ |
+
+ ConnectionBackoff holds backoff applied to connection.
+ |
+
+
+
+NATSEventsSource
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
NATSEventSource refers to event-source for NATS related events
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+url
+
+string
+
+ |
+
+ URL to connect to NATS cluster
+ |
+
+
+
+subject
+
+string
+
+ |
+
+ Subject holds the name of the subject onto which messages are published
+ |
+
+
+
+connectionBackoff
+
+github.com/argoproj/argo-events/common.Backoff
+
+ |
+
+ ConnectionBackoff holds backoff applied to connection.
+ |
+
+
+
+PubSubEventSource
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
PubSubEventSource refers to event-source for GCP PubSub related events.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+projectID
+
+string
+
+ |
+
+ ProjectID is the unique identifier for your project on GCP
+ |
+
+
+
+topicProjectID
+
+string
+
+ |
+
+ TopicProjectID identifies the project where the topic should exist or be created
+(assumed to be the same as ProjectID by default)
+ |
+
+
+
+topic
+
+string
+
+ |
+
+ Topic on which a subscription will be created
+ |
+
+
+
+credentialsFile
+
+string
+
+ |
+
+ CredentialsFile is the file that contains credentials to authenticate for GCP
+ |
+
+
+
+deleteSubscriptionOnFinish
+
+bool
+
+ |
+
+(Optional)
+ DeleteSubscriptionOnFinish determines whether to delete the GCP PubSub subscription once the event source is stopped.
+ |
+
+
+
+ResourceEventSource
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
ResourceEventSource refers to a event-source for K8s resource related events.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+namespace
+
+string
+
+ |
+
+ Namespace where resource is deployed
+ |
+
+
+
+filter
+
+
+ResourceFilter
+
+
+ |
+
+(Optional)
+ Filter is applied on the metadata of the resource
+ |
+
+
+
+GroupVersionResource
+
+
+Kubernetes meta/v1.GroupVersionResource
+
+
+ |
+
+
+(Members of GroupVersionResource are embedded into this type.)
+
+Group of the resource
+ |
+
+
+
+eventType
+
+
+ResourceEventType
+
+
+ |
+
+(Optional)
+ Type is the event type.
+If not provided, the gateway will watch all events for a resource.
+ |
+
+
+
+ResourceEventType
+(string
alias)
+
+(Appears on:
+ResourceEventSource)
+
+
+
ResourceEventType is the type of event for the K8s resource mutation
+
+ResourceFilter
+
+
+(Appears on:
+ResourceEventSource)
+
+
+
ResourceFilter contains K8 ObjectMeta information to further filter resource event objects
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+prefix
+
+string
+
+ |
+
+(Optional)
+ |
+
+
+
+labels
+
+map[string]string
+
+ |
+
+(Optional)
+ |
+
+
+
+fields
+
+map[string]string
+
+ |
+
+(Optional)
+ |
+
+
+
+createdBy
+
+
+Kubernetes meta/v1.Time
+
+
+ |
+
+(Optional)
+ |
+
+
+
+SNSEventSource
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
SNSEventSource refers to event-source for AWS SNS related events
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+webhook
+
+github.com/argoproj/argo-events/gateways/server/common/webhook.Context
+
+ |
+
+ Webhook configuration for http server
+ |
+
+
+
+topicArn
+
+string
+
+ |
+
+ TopicArn
+ |
+
+
+
+accessKey
+
+
+Kubernetes core/v1.SecretKeySelector
+
+
+ |
+
+ AccessKey refers K8 secret containing aws access key
+ |
+
+
+
+secretKey
+
+
+Kubernetes core/v1.SecretKeySelector
+
+
+ |
+
+ SecretKey refers K8 secret containing aws secret key
+ |
+
+
+
+namespace
+
+string
+
+ |
+
+(Optional)
+ Namespace refers to Kubernetes namespace to read access related secret from.
+ |
+
+
+
+region
+
+string
+
+ |
+
+ Region is AWS region
+ |
+
+
+
+SQSEventSource
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
SQSEventSource refers to event-source for AWS SQS related events
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+accessKey
+
+
+Kubernetes core/v1.SecretKeySelector
+
+
+ |
+
+ AccessKey refers K8 secret containing aws access key
+ |
+
+
+
+secretKey
+
+
+Kubernetes core/v1.SecretKeySelector
+
+
+ |
+
+ SecretKey refers K8 secret containing aws secret key
+ |
+
+
+
+region
+
+string
+
+ |
+
+ Region is AWS region
+ |
+
+
+
+queue
+
+string
+
+ |
+
+ Queue is AWS SQS queue to listen to for messages
+ |
+
+
+
+waitTimeSeconds
+
+int64
+
+ |
+
+ WaitTimeSeconds is The duration (in seconds) for which the call waits for a message to arrive
+in the queue before returning.
+ |
+
+
+
+namespace
+
+string
+
+ |
+
+(Optional)
+ Namespace refers to Kubernetes namespace to read access related secret from.
+ |
+
+
+
+SlackEventSource
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
SlackEventSource refers to event-source for Slack related events
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+signingSecret
+
+
+Kubernetes core/v1.SecretKeySelector
+
+
+ |
+
+ Slack App signing secret
+ |
+
+
+
+token
+
+
+Kubernetes core/v1.SecretKeySelector
+
+
+ |
+
+ Token for URL verification handshake
+ |
+
+
+
+webhook
+
+github.com/argoproj/argo-events/gateways/server/common/webhook.Context
+
+ |
+
+ Webhook holds configuration for a REST endpoint
+ |
+
+
+
+namespace
+
+string
+
+ |
+
+ Namespace refers to Kubernetes namespace which is used to retrieve token and signing secret from.
+ |
+
+
+
+StorageGridEventSource
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
StorageGridEventSource refers to event-source for StorageGrid related events
+
+
+StorageGridFilter
+
+
+(Appears on:
+StorageGridEventSource)
+
+
+
Filter represents filters to apply to bucket notifications for specifying constraints on objects
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+prefix
+
+string
+
+ |
+
+ |
+
+
+
+suffix
+
+string
+
+ |
+
+ |
+
+
+
+
+
+Generated with gen-crd-api-reference-docs
+on git commit 8d85191
.
+
diff --git a/api/event-source.md b/api/event-source.md
new file mode 100644
index 0000000000..06589b34aa
--- /dev/null
+++ b/api/event-source.md
@@ -0,0 +1,3489 @@
+
+
+Packages:
+
+
+
+
+
+
+
+argoproj.io/v1alpha1
+
+
+
+
+
+
+
+Package v1alpha1 is the v1alpha1 version of the API.
+
+
+
+
+
+Resource Types:
+
+
+
+
+
+AMQPEventSource
+
+
+
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
+
+
+
+
+AMQPEventSource refers to an event-source for AMQP stream events
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+url string
+
+ |
+
+
+
+
+
+URL for rabbitmq service
+
+
+
+ |
+
+
+
+
+
+
+
+exchangeName string
+
+ |
+
+
+
+
+
+ExchangeName is the exchange name For more information, visit
+https://www.rabbitmq.com/tutorials/amqp-concepts.html
+
+
+
+ |
+
+
+
+
+
+
+
+exchangeType string
+
+ |
+
+
+
+
+
+ExchangeType is rabbitmq exchange type
+
+
+
+ |
+
+
+
+
+
+
+
+routingKey string
+
+ |
+
+
+
+
+
+Routing key for bindings
+
+
+
+ |
+
+
+
+
+
+
+
+connectionBackoff
+github.com/argoproj/argo-events/common.Backoff
+
+ |
+
+
+
+(Optional)
+
+
+
+Backoff holds parameters applied to connection.
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+CalendarEventSource
+
+
+
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
+
+
+
+
+CalendarEventSource describes a time based dependency. One of the fields
+(schedule, interval, or recurrence) must be passed. Schedule takes
+precedence over interval; interval takes precedence over recurrence
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+schedule string
+
+ |
+
+
+
+
+
+Schedule is a cron-like expression. For reference, see:
+https://en.wikipedia.org/wiki/Cron
+
+
+
+ |
+
+
+
+
+
+
+
+interval string
+
+ |
+
+
+
+
+
+Interval is a string that describes an interval duration, e.g. 1s, 30m,
+2h…
+
+
+
+ |
+
+
+
+
+
+
+
+exclusionDates \[\]string
+
+ |
+
+
+
+
+
+ExclusionDates defines the list of DATE-TIME exceptions for recurring
+events.
+
+
+
+ |
+
+
+
+
+
+
+
+timezone string
+
+ |
+
+
+
+(Optional)
+
+
+
+Timezone in which to run the schedule
+
+
+
+ |
+
+
+
+
+
+
+
+userPayload encoding/json.RawMessage
+
+ |
+
+
+
+(Optional)
+
+
+
+UserPayload will be sent to sensor as extra data once the event is
+triggered
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+EventSource
+
+
+
+
+
+
+
+EventSource is the definition of a eventsource resource
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+metadata
+
+Kubernetes meta/v1.ObjectMeta
+
+ |
+
+
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+
+ |
+
+
+
+
+
+
+
+status
+ EventSourceStatus
+
+
+ |
+
+
+
+ |
+
+
+
+
+
+
+
+spec
+ EventSourceSpec
+
+
+ |
+
+
+
+
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+EventSourceSpec
+
+
+
+
+
+(Appears on:
+EventSource)
+
+
+
+
+
+
+
+EventSourceSpec refers to specification of event-source resource
+
+
+
+
+
+
+
+
+
+EventSourceStatus
+
+
+
+
+
+(Appears on:
+EventSource)
+
+
+
+
+
+
+
+EventSourceStatus holds the status of the event-source resource
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+createdAt
+
+Kubernetes meta/v1.Time
+
+ |
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+FileEventSource
+
+
+
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
+
+
+
+
+FileEventSource describes an event-source for file related events.
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+eventType string
+
+ |
+
+
+
+
+
+Type of file operations to watch Refer
+https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go
+for more information
+
+
+
+ |
+
+
+
+
+
+
+
+watchPathConfig
+github.com/argoproj/argo-events/gateways/server/common/fsevent.WatchPathConfig
+
+
+ |
+
+
+
+
+
+WatchPathConfig contains configuration about the file path to watch
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+GithubEventSource
+
+
+
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
+
+
+
+
+GithubEventSource refers to event-source for github related events
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+id int64
+
+ |
+
+
+
+
+
+Id is the webhook’s id
+
+
+
+ |
+
+
+
+
+
+
+
+webhook
+github.com/argoproj/argo-events/gateways/server/common/webhook.Context
+
+
+ |
+
+
+
+
+
+Webhook refers to the configuration required to run a http server
+
+
+
+ |
+
+
+
+
+
+
+
+owner string
+
+ |
+
+
+
+
+
+Owner refers to GitHub owner name i.e. argoproj
+
+
+
+ |
+
+
+
+
+
+
+
+repository string
+
+ |
+
+
+
+
+
+Repository refers to GitHub repo name i.e. argo-events
+
+
+
+ |
+
+
+
+
+
+
+
+events \[\]string
+
+ |
+
+
+
+
+
+Events refer to Github events to subscribe to which the gateway will
+subscribe
+
+
+
+ |
+
+
+
+
+
+
+
+apiToken
+
+Kubernetes core/v1.SecretKeySelector
+
+ |
+
+
+
+
+
+APIToken refers to a K8s secret containing github api token
+
+
+
+ |
+
+
+
+
+
+
+
+webhookSecret
+
+Kubernetes core/v1.SecretKeySelector
+
+ |
+
+
+
+(Optional)
+
+
+
+WebhookSecret refers to K8s secret containing GitHub webhook secret
+https://developer.github.com/webhooks/securing/
+
+
+
+ |
+
+
+
+
+
+
+
+insecure bool
+
+ |
+
+
+
+
+
+Insecure tls verification
+
+
+
+ |
+
+
+
+
+
+
+
+active bool
+
+ |
+
+
+
+(Optional)
+
+
+
+Active refers to status of the webhook for event deliveries.
+https://developer.github.com/webhooks/creating/\#active
+
+
+
+ |
+
+
+
+
+
+
+
+contentType string
+
+ |
+
+
+
+
+
+ContentType of the event delivery
+
+
+
+ |
+
+
+
+
+
+
+
+githubBaseURL string
+
+ |
+
+
+
+(Optional)
+
+
+
+GitHub base URL (for GitHub Enterprise)
+
+
+
+ |
+
+
+
+
+
+
+
+githubUploadURL string
+
+ |
+
+
+
+(Optional)
+
+
+
+GitHub upload URL (for GitHub Enterprise)
+
+
+
+ |
+
+
+
+
+
+
+
+namespace string
+
+ |
+
+
+
+
+
+Namespace refers to Kubernetes namespace which is used to retrieve
+webhook secret and api token from.
+
+
+
+ |
+
+
+
+
+
+
+
+deleteHookOnFinish bool
+
+ |
+
+
+
+(Optional)
+
+
+
+DeleteHookOnFinish determines whether to delete the GitHub hook for the
+repository once the event source is stopped.
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+GitlabEventSource
+
+
+
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
+
+
+
+
+GitlabEventSource refers to event-source related to Gitlab events
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+webhook
+github.com/argoproj/argo-events/gateways/server/common/webhook.Context
+
+
+ |
+
+
+
+
+
+Webhook holds configuration to run a http server
+
+
+
+ |
+
+
+
+
+
+
+
+projectId string
+
+ |
+
+
+
+
+
+ProjectId is the id of project for which integration needs to setup
+
+
+
+ |
+
+
+
+
+
+
+
+event string
+
+ |
+
+
+
+
+
+Event is a gitlab event to listen to. Refer
+https://github.com/xanzy/go-gitlab/blob/bf34eca5d13a9f4c3f501d8a97b8ac226d55e4d9/projects.go\#L794.
+
+
+
+ |
+
+
+
+
+
+
+
+accessToken
+
+Kubernetes core/v1.SecretKeySelector
+
+ |
+
+
+
+
+
+AccessToken is reference to k8 secret which holds the gitlab api access
+information
+
+
+
+ |
+
+
+
+
+
+
+
+enableSSLVerification bool
+
+ |
+
+
+
+(Optional)
+
+
+
+EnableSSLVerification to enable ssl verification
+
+
+
+ |
+
+
+
+
+
+
+
+gitlabBaseURL string
+
+ |
+
+
+
+
+
+GitlabBaseURL is the base URL for API requests to a custom endpoint
+
+
+
+ |
+
+
+
+
+
+
+
+namespace string
+
+ |
+
+
+
+
+
+Namespace refers to Kubernetes namespace which is used to retrieve
+access token from.
+
+
+
+ |
+
+
+
+
+
+
+
+deleteHookOnFinish bool
+
+ |
+
+
+
+(Optional)
+
+
+
+DeleteHookOnFinish determines whether to delete the GitLab hook for the
+project once the event source is stopped.
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+HDFSEventSource
+
+
+
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
+
+
+
+
+HDFSEventSource refers to event-source for HDFS related events
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+WatchPathConfig
+github.com/argoproj/argo-events/gateways/server/common/fsevent.WatchPathConfig
+
+
+ |
+
+
+
+
+
+(Members of WatchPathConfig are embedded into this type.)
+
+
+
+ |
+
+
+
+
+
+
+
+type string
+
+ |
+
+
+
+
+
+Type of file operations to watch
+
+
+
+ |
+
+
+
+
+
+
+
+checkInterval string
+
+ |
+
+
+
+
+
+CheckInterval is a string that describes an interval duration to check
+the directory state, e.g. 1s, 30m, 2h… (defaults to 1m)
+
+
+
+ |
+
+
+
+
+
+
+
+addresses \[\]string
+
+ |
+
+
+
+
+
+Addresses is accessible addresses of HDFS name nodes
+
+
+
+ |
+
+
+
+
+
+
+
+hdfsUser string
+
+ |
+
+
+
+
+
+HDFSUser is the user to access HDFS file system. It is ignored if either
+ccache or keytab is used.
+
+
+
+ |
+
+
+
+
+
+
+
+krbCCacheSecret
+
+Kubernetes core/v1.SecretKeySelector
+
+ |
+
+
+
+
+
+KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache
+or keytab can be set to use Kerberos.
+
+
+
+ |
+
+
+
+
+
+
+
+krbKeytabSecret
+
+Kubernetes core/v1.SecretKeySelector
+
+ |
+
+
+
+
+
+KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache
+or keytab can be set to use Kerberos.
+
+
+
+ |
+
+
+
+
+
+
+
+krbUsername string
+
+ |
+
+
+
+
+
+KrbUsername is the Kerberos username used with Kerberos keytab It must
+be set if keytab is used.
+
+
+
+ |
+
+
+
+
+
+
+
+krbRealm string
+
+ |
+
+
+
+
+
+KrbRealm is the Kerberos realm used with Kerberos keytab It must be set
+if keytab is used.
+
+
+
+ |
+
+
+
+
+
+
+
+krbConfigConfigMap
+
+Kubernetes core/v1.ConfigMapKeySelector
+
+ |
+
+
+
+
+
+KrbConfig is the configmap selector for Kerberos config as string It
+must be set if either ccache or keytab is used.
+
+
+
+ |
+
+
+
+
+
+
+
+krbServicePrincipalName string
+
+ |
+
+
+
+
+
+KrbServicePrincipalName is the principal name of Kerberos service It
+must be set if either ccache or keytab is used.
+
+
+
+ |
+
+
+
+
+
+
+
+namespace string
+
+ |
+
+
+
+
+
+Namespace refers to Kubernetes namespace which is used to retrieve cache
+secret and ket tab secret from.
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+KafkaEventSource
+
+
+
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
+
+
+
+
+KafkaEventSource refers to event-source for Kafka related events
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+url string
+
+ |
+
+
+
+
+
+URL to kafka cluster
+
+
+
+ |
+
+
+
+
+
+
+
+partition string
+
+ |
+
+
+
+
+
+Partition name
+
+
+
+ |
+
+
+
+
+
+
+
+topic string
+
+ |
+
+
+
+
+
+Topic name
+
+
+
+ |
+
+
+
+
+
+
+
+connectionBackoff
+github.com/argoproj/argo-events/common.Backoff
+
+ |
+
+
+
+
+
+Backoff holds parameters applied to connection.
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+MQTTEventSource
+
+
+
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
+
+
+
+
+MQTTEventSource refers to event-source for MQTT related events
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+url string
+
+ |
+
+
+
+
+
+URL to connect to broker
+
+
+
+ |
+
+
+
+
+
+
+
+topic string
+
+ |
+
+
+
+
+
+Topic name
+
+
+
+ |
+
+
+
+
+
+
+
+clientId string
+
+ |
+
+
+
+
+
+ClientID is the id of the client
+
+
+
+ |
+
+
+
+
+
+
+
+connectionBackoff
+github.com/argoproj/argo-events/common.Backoff
+
+ |
+
+
+
+
+
+ConnectionBackoff holds backoff applied to connection.
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+NATSEventsSource
+
+
+
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
+
+
+
+
+NATSEventSource refers to event-source for NATS related events
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+url string
+
+ |
+
+
+
+
+
+URL to connect to NATS cluster
+
+
+
+ |
+
+
+
+
+
+
+
+subject string
+
+ |
+
+
+
+
+
+Subject holds the name of the subject onto which messages are published
+
+
+
+ |
+
+
+
+
+
+
+
+connectionBackoff
+github.com/argoproj/argo-events/common.Backoff
+
+ |
+
+
+
+
+
+ConnectionBackoff holds backoff applied to connection.
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+PubSubEventSource
+
+
+
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
+
+
+
+
+PubSubEventSource refers to event-source for GCP PubSub related events.
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+projectID string
+
+ |
+
+
+
+
+
+ProjectID is the unique identifier for your project on GCP
+
+
+
+ |
+
+
+
+
+
+
+
+topicProjectID string
+
+ |
+
+
+
+
+
+TopicProjectID identifies the project where the topic should exist or be
+created (assumed to be the same as ProjectID by default)
+
+
+
+ |
+
+
+
+
+
+
+
+topic string
+
+ |
+
+
+
+
+
+Topic on which a subscription will be created
+
+
+
+ |
+
+
+
+
+
+
+
+credentialsFile string
+
+ |
+
+
+
+
+
+CredentialsFile is the file that contains credentials to authenticate
+for GCP
+
+
+
+ |
+
+
+
+
+
+
+
+deleteSubscriptionOnFinish bool
+
+ |
+
+
+
+(Optional)
+
+
+
+DeleteSubscriptionOnFinish determines whether to delete the GCP PubSub
+subscription once the event source is stopped.
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+ResourceEventSource
+
+
+
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
+
+
+
+
+ResourceEventSource refers to a event-source for K8s resource related
+events.
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+namespace string
+
+ |
+
+
+
+
+
+Namespace where resource is deployed
+
+
+
+ |
+
+
+
+
+
+
+
+filter
+ ResourceFilter
+
+
+ |
+
+
+
+(Optional)
+
+
+
+Filter is applied on the metadata of the resource
+
+
+
+ |
+
+
+
+
+
+
+
+GroupVersionResource
+
+Kubernetes meta/v1.GroupVersionResource
+
+ |
+
+
+
+
+
+(Members of GroupVersionResource are embedded into this
+type.)
+
+
+
+
+
+Group of the resource
+
+
+
+ |
+
+
+
+
+
+
+
+eventType
+ ResourceEventType
+
+
+ |
+
+
+
+(Optional)
+
+
+
+Type is the event type. If not provided, the gateway will watch all
+events for a resource.
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+ResourceEventType (string
alias)
+
+
+
+
+
+
+
+(Appears on:
+ResourceEventSource)
+
+
+
+
+
+
+
+ResourceEventType is the type of event for the K8s resource mutation
+
+
+
+
+
+
+
+ResourceFilter
+
+
+
+
+
+(Appears on:
+ResourceEventSource)
+
+
+
+
+
+
+
+ResourceFilter contains K8 ObjectMeta information to further filter
+resource event objects
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+prefix string
+
+ |
+
+
+
+(Optional)
+
+ |
+
+
+
+
+
+
+
+labels map\[string\]string
+
+ |
+
+
+
+(Optional)
+
+ |
+
+
+
+
+
+
+
+fields map\[string\]string
+
+ |
+
+
+
+(Optional)
+
+ |
+
+
+
+
+
+
+
+createdBy
+
+Kubernetes meta/v1.Time
+
+ |
+
+
+
+(Optional)
+
+ |
+
+
+
+
+
+
+
+
+
+SNSEventSource
+
+
+
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
+
+
+
+
+SNSEventSource refers to event-source for AWS SNS related events
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+webhook
+github.com/argoproj/argo-events/gateways/server/common/webhook.Context
+
+
+ |
+
+
+
+
+
+Webhook configuration for http server
+
+
+
+ |
+
+
+
+
+
+
+
+topicArn string
+
+ |
+
+
+
+
+
+TopicArn
+
+
+
+ |
+
+
+
+
+
+
+
+accessKey
+
+Kubernetes core/v1.SecretKeySelector
+
+ |
+
+
+
+
+
+AccessKey refers K8 secret containing aws access key
+
+
+
+ |
+
+
+
+
+
+
+
+secretKey
+
+Kubernetes core/v1.SecretKeySelector
+
+ |
+
+
+
+
+
+SecretKey refers K8 secret containing aws secret key
+
+
+
+ |
+
+
+
+
+
+
+
+namespace string
+
+ |
+
+
+
+(Optional)
+
+
+
+Namespace refers to Kubernetes namespace to read access related secret
+from.
+
+
+
+ |
+
+
+
+
+
+
+
+region string
+
+ |
+
+
+
+
+
+Region is AWS region
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+SQSEventSource
+
+
+
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
+
+
+
+
+SQSEventSource refers to event-source for AWS SQS related events
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+accessKey
+
+Kubernetes core/v1.SecretKeySelector
+
+ |
+
+
+
+
+
+AccessKey refers K8 secret containing aws access key
+
+
+
+ |
+
+
+
+
+
+
+
+secretKey
+
+Kubernetes core/v1.SecretKeySelector
+
+ |
+
+
+
+
+
+SecretKey refers K8 secret containing aws secret key
+
+
+
+ |
+
+
+
+
+
+
+
+region string
+
+ |
+
+
+
+
+
+Region is AWS region
+
+
+
+ |
+
+
+
+
+
+
+
+queue string
+
+ |
+
+
+
+
+
+Queue is AWS SQS queue to listen to for messages
+
+
+
+ |
+
+
+
+
+
+
+
+waitTimeSeconds int64
+
+ |
+
+
+
+
+
+WaitTimeSeconds is The duration (in seconds) for which the call waits
+for a message to arrive in the queue before returning.
+
+
+
+ |
+
+
+
+
+
+
+
+namespace string
+
+ |
+
+
+
+(Optional)
+
+
+
+Namespace refers to Kubernetes namespace to read access related secret
+from.
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+SlackEventSource
+
+
+
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
+
+
+
+
+SlackEventSource refers to event-source for Slack related events
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+signingSecret
+
+Kubernetes core/v1.SecretKeySelector
+
+ |
+
+
+
+
+
+Slack App signing secret
+
+
+
+ |
+
+
+
+
+
+
+
+token
+
+Kubernetes core/v1.SecretKeySelector
+
+ |
+
+
+
+
+
+Token for URL verification handshake
+
+
+
+ |
+
+
+
+
+
+
+
+webhook
+github.com/argoproj/argo-events/gateways/server/common/webhook.Context
+
+
+ |
+
+
+
+
+
+Webhook holds configuration for a REST endpoint
+
+
+
+ |
+
+
+
+
+
+
+
+namespace string
+
+ |
+
+
+
+
+
+Namespace refers to Kubernetes namespace which is used to retrieve token
+and signing secret from.
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+StorageGridEventSource
+
+
+
+
+
+(Appears on:
+EventSourceSpec)
+
+
+
+
+
+
+
+StorageGridEventSource refers to event-source for StorageGrid related
+events
+
+
+
+
+
+
+
+
+
+StorageGridFilter
+
+
+
+
+
+(Appears on:
+StorageGridEventSource)
+
+
+
+
+
+
+
+Filter represents filters to apply to bucket notifications for
+specifying constraints on objects
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+prefix string
+
+ |
+
+
+
+ |
+
+
+
+
+
+
+
+suffix string
+
+ |
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+ Generated with gen-crd-api-reference-docs
on git
+commit 8d85191
.
+
+
diff --git a/api/gateway.html b/api/gateway.html
new file mode 100644
index 0000000000..965c9ad323
--- /dev/null
+++ b/api/gateway.html
@@ -0,0 +1,731 @@
+Packages:
+
+argoproj.io/v1alpha1
+
+
Package v1alpha1 is the v1alpha1 version of the API.
+
+Resource Types:
+
+EventSourceRef
+
+
+(Appears on:
+GatewaySpec)
+
+
+
EventSourceRef holds information about the EventSourceRef custom resource
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+string
+
+ |
+
+ Name of the event source
+ |
+
+
+
+namespace
+
+string
+
+ |
+
+(Optional)
+ Namespace of the event source
+Default value is the namespace where referencing gateway is deployed
+ |
+
+
+
+Gateway
+
+
+
Gateway is the definition of a gateway resource
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+metadata
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+ |
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+ |
+
+
+
+status
+
+
+GatewayStatus
+
+
+ |
+
+ |
+
+
+
+spec
+
+
+GatewaySpec
+
+
+ |
+
+
+
+
+
+
+template
+
+
+Kubernetes core/v1.PodTemplateSpec
+
+
+ |
+
+ Template is the pod specification for the gateway
+Refer https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#pod-v1-core
+ |
+
+
+
+eventSourceRef
+
+
+EventSourceRef
+
+
+ |
+
+ EventSourceRef refers to event-source that stores event source configurations for the gateway
+ |
+
+
+
+type
+
+Argo Events common.EventSourceType
+
+ |
+
+ Type is the type of gateway. Used as metadata.
+ |
+
+
+
+service
+
+
+Kubernetes core/v1.Service
+
+
+ |
+
+ Service is the specifications of the service to expose the gateway
+Refer https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#service-v1-core
+ |
+
+
+
+watchers
+
+
+NotificationWatchers
+
+
+ |
+
+ Watchers are components which are interested listening to notifications from this gateway
+These only need to be specified when gateway dispatch mechanism is through HTTP POST notifications.
+In future, support for NATS, KAFKA will be added as a means to dispatch notifications in which case
+specifying watchers would be unnecessary.
+ |
+
+
+
+processorPort
+
+string
+
+ |
+
+ Port on which the gateway event source processor is running on.
+ |
+
+
+
+eventProtocol
+
+Argo Events common.EventProtocol
+
+ |
+
+ EventProtocol is the underlying protocol used to send events from gateway to watchers(components interested in listening to event from this gateway)
+ |
+
+
+
+replica
+
+int
+
+ |
+
+ Replica is the gateway deployment replicas
+ |
+
+
+ |
+
+
+
+GatewayNotificationWatcher
+
+
+(Appears on:
+NotificationWatchers)
+
+
+
GatewayNotificationWatcher is the gateway interested in listening to notifications from this gateway
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+string
+
+ |
+
+ Name is the gateway name
+ |
+
+
+
+port
+
+string
+
+ |
+
+ Port is http server port on which gateway is running
+ |
+
+
+
+endpoint
+
+string
+
+ |
+
+ Endpoint is REST API endpoint to post event to.
+Events are sent using HTTP POST method to this endpoint.
+ |
+
+
+
+namespace
+
+string
+
+ |
+
+ Namespace of the gateway
+ |
+
+
+
+GatewayResource
+
+
+(Appears on:
+GatewayStatus)
+
+
+
GatewayResource holds the metadata about the gateway resources
+
+
+GatewaySpec
+
+
+(Appears on:
+Gateway)
+
+
+
GatewaySpec represents gateway specifications
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+template
+
+
+Kubernetes core/v1.PodTemplateSpec
+
+
+ |
+
+ Template is the pod specification for the gateway
+Refer https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#pod-v1-core
+ |
+
+
+
+eventSourceRef
+
+
+EventSourceRef
+
+
+ |
+
+ EventSourceRef refers to event-source that stores event source configurations for the gateway
+ |
+
+
+
+type
+
+Argo Events common.EventSourceType
+
+ |
+
+ Type is the type of gateway. Used as metadata.
+ |
+
+
+
+service
+
+
+Kubernetes core/v1.Service
+
+
+ |
+
+ Service is the specifications of the service to expose the gateway
+Refer https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#service-v1-core
+ |
+
+
+
+watchers
+
+
+NotificationWatchers
+
+
+ |
+
+ Watchers are components which are interested listening to notifications from this gateway
+These only need to be specified when gateway dispatch mechanism is through HTTP POST notifications.
+In future, support for NATS, KAFKA will be added as a means to dispatch notifications in which case
+specifying watchers would be unnecessary.
+ |
+
+
+
+processorPort
+
+string
+
+ |
+
+ Port on which the gateway event source processor is running on.
+ |
+
+
+
+eventProtocol
+
+Argo Events common.EventProtocol
+
+ |
+
+ EventProtocol is the underlying protocol used to send events from gateway to watchers(components interested in listening to event from this gateway)
+ |
+
+
+
+replica
+
+int
+
+ |
+
+ Replica is the gateway deployment replicas
+ |
+
+
+
+GatewayStatus
+
+
+(Appears on:
+Gateway)
+
+
+
GatewayStatus contains information about the status of a gateway.
+
+
+NodePhase
+(string
alias)
+
+(Appears on:
+GatewayStatus,
+NodeStatus)
+
+
+
NodePhase is the label for the condition of a node.
+
+NodeStatus
+
+
+(Appears on:
+GatewayStatus)
+
+
+
NodeStatus describes the status for an individual node in the gateway configurations.
+A single node can represent one configuration.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+id
+
+string
+
+ |
+
+ ID is a unique identifier of a node within a sensor
+It is a hash of the node name
+ |
+
+
+
+name
+
+string
+
+ |
+
+ Name is a unique name in the node tree used to generate the node ID
+ |
+
+
+
+displayName
+
+string
+
+ |
+
+ DisplayName is the human readable representation of the node
+ |
+
+
+
+phase
+
+
+NodePhase
+
+
+ |
+
+ Phase of the node
+ |
+
+
+
+startedAt
+
+
+Kubernetes meta/v1.MicroTime
+
+
+ |
+
+ StartedAt is the time at which this node started
+ |
+
+
+
+message
+
+string
+
+ |
+
+ Message store data or something to save for configuration
+ |
+
+
+
+updateTime
+
+
+Kubernetes meta/v1.MicroTime
+
+
+ |
+
+ UpdateTime is the time when node(gateway configuration) was updated
+ |
+
+
+
+NotificationWatchers
+
+
+(Appears on:
+GatewaySpec)
+
+
+
NotificationWatchers are components which are interested listening to notifications from this gateway
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+gateways
+
+
+[]GatewayNotificationWatcher
+
+
+ |
+
+ Gateways is the list of gateways interested in listening to notifications from this gateway
+ |
+
+
+
+sensors
+
+
+[]SensorNotificationWatcher
+
+
+ |
+
+ Sensors is the list of sensors interested in listening to notifications from this gateway
+ |
+
+
+
+SensorNotificationWatcher
+
+
+(Appears on:
+NotificationWatchers)
+
+
+
SensorNotificationWatcher is the sensor interested in listening to notifications from this gateway
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+string
+
+ |
+
+ Name is the name of the sensor
+ |
+
+
+
+namespace
+
+string
+
+ |
+
+ Namespace of the sensor
+ |
+
+
+
+
+
+Generated with gen-crd-api-reference-docs
+on git commit 8d85191
.
+
diff --git a/api/gateway.md b/api/gateway.md
new file mode 100644
index 0000000000..20df87387d
--- /dev/null
+++ b/api/gateway.md
@@ -0,0 +1,1451 @@
+
+
+Packages:
+
+
+
+
+
+
+
+argoproj.io/v1alpha1
+
+
+
+
+
+
+
+Package v1alpha1 is the v1alpha1 version of the API.
+
+
+
+
+
+Resource Types:
+
+
+
+
+
+EventSourceRef
+
+
+
+
+
+(Appears on:
+GatewaySpec)
+
+
+
+
+
+
+
+EventSourceRef holds information about the EventSourceRef custom
+resource
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+name string
+
+ |
+
+
+
+
+
+Name of the event source
+
+
+
+ |
+
+
+
+
+
+
+
+namespace string
+
+ |
+
+
+
+(Optional)
+
+
+
+Namespace of the event source Default value is the namespace where
+referencing gateway is deployed
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+Gateway
+
+
+
+
+
+
+
+Gateway is the definition of a gateway resource
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+metadata
+
+Kubernetes meta/v1.ObjectMeta
+
+ |
+
+
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+
+ |
+
+
+
+
+
+
+
+status
+ GatewayStatus
+
+ |
+
+
+
+ |
+
+
+
+
+
+
+
+spec
+GatewaySpec
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+template
+
+Kubernetes core/v1.PodTemplateSpec
+
+ |
+
+
+
+
+
+Template is the pod specification for the gateway Refer
+https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/\#pod-v1-core
+
+
+
+ |
+
+
+
+
+
+
+
+eventSourceRef
+ EventSourceRef
+
+
+ |
+
+
+
+
+
+EventSourceRef refers to event-source that stores event source
+configurations for the gateway
+
+
+
+ |
+
+
+
+
+
+
+
+type Argo Events common.EventSourceType
+
+ |
+
+
+
+
+
+Type is the type of gateway. Used as metadata.
+
+
+
+ |
+
+
+
+
+
+
+
+service
+
+Kubernetes core/v1.Service
+
+ |
+
+
+
+
+
+Service is the specifications of the service to expose the gateway Refer
+https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/\#service-v1-core
+
+
+
+ |
+
+
+
+
+
+
+
+watchers
+
+NotificationWatchers
+
+ |
+
+
+
+
+
+Watchers are components which are interested listening to notifications
+from this gateway These only need to be specified when gateway dispatch
+mechanism is through HTTP POST notifications. In future, support for
+NATS, KAFKA will be added as a means to dispatch notifications in which
+case specifying watchers would be unnecessary.
+
+
+
+ |
+
+
+
+
+
+
+
+processorPort string
+
+ |
+
+
+
+
+
+Port on which the gateway event source processor is running on.
+
+
+
+ |
+
+
+
+
+
+
+
+eventProtocol Argo Events common.EventProtocol
+
+
+ |
+
+
+
+
+
+EventProtocol is the underlying protocol used to send events from
+gateway to watchers(components interested in listening to event from
+this gateway)
+
+
+
+ |
+
+
+
+
+
+
+
+replica int
+
+ |
+
+
+
+
+
+Replica is the gateway deployment replicas
+
+
+
+ |
+
+
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+GatewayNotificationWatcher
+
+
+
+
+
+(Appears on:
+NotificationWatchers)
+
+
+
+
+
+
+
+GatewayNotificationWatcher is the gateway interested in listening to
+notifications from this gateway
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+name string
+
+ |
+
+
+
+
+
+Name is the gateway name
+
+
+
+ |
+
+
+
+
+
+
+
+port string
+
+ |
+
+
+
+
+
+Port is http server port on which gateway is running
+
+
+
+ |
+
+
+
+
+
+
+
+endpoint string
+
+ |
+
+
+
+
+
+Endpoint is REST API endpoint to post event to. Events are sent using
+HTTP POST method to this endpoint.
+
+
+
+ |
+
+
+
+
+
+
+
+namespace string
+
+ |
+
+
+
+
+
+Namespace of the gateway
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+GatewayResource
+
+
+
+
+
+(Appears on:
+GatewayStatus)
+
+
+
+
+
+
+
+GatewayResource holds the metadata about the gateway resources
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+deployment
+
+Kubernetes meta/v1.ObjectMeta
+
+ |
+
+
+
+
+
+Metadata of the deployment for the gateway
+
+
+
+ |
+
+
+
+
+
+
+
+service
+
+Kubernetes meta/v1.ObjectMeta
+
+ |
+
+
+
+(Optional)
+
+
+
+Metadata of the service for the gateway
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+GatewaySpec
+
+
+
+
+
+(Appears on:
+Gateway)
+
+
+
+
+
+
+
+GatewaySpec represents gateway specifications
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+template
+
+Kubernetes core/v1.PodTemplateSpec
+
+ |
+
+
+
+
+
+Template is the pod specification for the gateway Refer
+https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/\#pod-v1-core
+
+
+
+ |
+
+
+
+
+
+
+
+eventSourceRef
+ EventSourceRef
+
+
+ |
+
+
+
+
+
+EventSourceRef refers to event-source that stores event source
+configurations for the gateway
+
+
+
+ |
+
+
+
+
+
+
+
+type Argo Events common.EventSourceType
+
+ |
+
+
+
+
+
+Type is the type of gateway. Used as metadata.
+
+
+
+ |
+
+
+
+
+
+
+
+service
+
+Kubernetes core/v1.Service
+
+ |
+
+
+
+
+
+Service is the specifications of the service to expose the gateway Refer
+https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/\#service-v1-core
+
+
+
+ |
+
+
+
+
+
+
+
+watchers
+
+NotificationWatchers
+
+ |
+
+
+
+
+
+Watchers are components which are interested listening to notifications
+from this gateway These only need to be specified when gateway dispatch
+mechanism is through HTTP POST notifications. In future, support for
+NATS, KAFKA will be added as a means to dispatch notifications in which
+case specifying watchers would be unnecessary.
+
+
+
+ |
+
+
+
+
+
+
+
+processorPort string
+
+ |
+
+
+
+
+
+Port on which the gateway event source processor is running on.
+
+
+
+ |
+
+
+
+
+
+
+
+eventProtocol Argo Events common.EventProtocol
+
+
+ |
+
+
+
+
+
+EventProtocol is the underlying protocol used to send events from
+gateway to watchers(components interested in listening to event from
+this gateway)
+
+
+
+ |
+
+
+
+
+
+
+
+replica int
+
+ |
+
+
+
+
+
+Replica is the gateway deployment replicas
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+GatewayStatus
+
+
+
+
+
+(Appears on:
+Gateway)
+
+
+
+
+
+
+
+GatewayStatus contains information about the status of a gateway.
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+phase
+NodePhase
+
+ |
+
+
+
+
+
+Phase is the high-level summary of the gateway
+
+
+
+ |
+
+
+
+
+
+
+
+startedAt
+
+Kubernetes meta/v1.Time
+
+ |
+
+
+
+
+
+StartedAt is the time at which this gateway was initiated
+
+
+
+ |
+
+
+
+
+
+
+
+message string
+
+ |
+
+
+
+
+
+Message is a human readable string indicating details about a gateway in
+its phase
+
+
+
+ |
+
+
+
+
+
+
+
+nodes
+map\[string\]github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1.NodeStatus
+
+
+ |
+
+
+
+
+
+Nodes is a mapping between a node ID and the node’s status it records
+the states for the configurations of gateway.
+
+
+
+ |
+
+
+
+
+
+
+
+resources
+ GatewayResource
+
+
+ |
+
+
+
+
+
+Resources refers to the metadata about the gateway resources
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+NodePhase (string
alias)
+
+
+
+
+
+
+
+(Appears on:
+GatewayStatus,
+NodeStatus)
+
+
+
+
+
+
+
+NodePhase is the label for the condition of a node.
+
+
+
+
+
+
+
+NodeStatus
+
+
+
+
+
+(Appears on:
+GatewayStatus)
+
+
+
+
+
+
+
+NodeStatus describes the status for an individual node in the gateway
+configurations. A single node can represent one configuration.
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+id string
+
+ |
+
+
+
+
+
+ID is a unique identifier of a node within a sensor It is a hash of the
+node name
+
+
+
+ |
+
+
+
+
+
+
+
+name string
+
+ |
+
+
+
+
+
+Name is a unique name in the node tree used to generate the node ID
+
+
+
+ |
+
+
+
+
+
+
+
+displayName string
+
+ |
+
+
+
+
+
+DisplayName is the human readable representation of the node
+
+
+
+ |
+
+
+
+
+
+
+
+phase
+NodePhase
+
+ |
+
+
+
+
+
+Phase of the node
+
+
+
+ |
+
+
+
+
+
+
+
+startedAt
+
+Kubernetes meta/v1.MicroTime
+
+ |
+
+
+
+
+
+StartedAt is the time at which this node started
+
+
+
+ |
+
+
+
+
+
+
+
+message string
+
+ |
+
+
+
+
+
+Message store data or something to save for configuration
+
+
+
+ |
+
+
+
+
+
+
+
+updateTime
+
+Kubernetes meta/v1.MicroTime
+
+ |
+
+
+
+
+
+UpdateTime is the time when node(gateway configuration) was updated
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+NotificationWatchers
+
+
+
+
+
+(Appears on:
+GatewaySpec)
+
+
+
+
+
+
+
+NotificationWatchers are components which are interested listening to
+notifications from this gateway
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+gateways
+
+\[\]GatewayNotificationWatcher
+
+ |
+
+
+
+
+
+Gateways is the list of gateways interested in listening to
+notifications from this gateway
+
+
+
+ |
+
+
+
+
+
+
+
+sensors
+
+\[\]SensorNotificationWatcher
+
+ |
+
+
+
+
+
+Sensors is the list of sensors interested in listening to notifications
+from this gateway
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+SensorNotificationWatcher
+
+
+
+
+
+(Appears on:
+NotificationWatchers)
+
+
+
+
+
+
+
+SensorNotificationWatcher is the sensor interested in listening to
+notifications from this gateway
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+name string
+
+ |
+
+
+
+
+
+Name is the name of the sensor
+
+
+
+ |
+
+
+
+
+
+
+
+namespace string
+
+ |
+
+
+
+
+
+Namespace of the sensor
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+ Generated with gen-crd-api-reference-docs
on git
+commit 8d85191
.
+
+
diff --git a/api/generate.sh b/api/generate.sh
new file mode 100644
index 0000000000..5c50365bfd
--- /dev/null
+++ b/api/generate.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env sh
+
+pandoc --from markdown --to gfm event-source.html > event-source.md
+pandoc --from markdown --to gfm gateway.html > gateway.md
+pandoc --from markdown --to gfm sensor.html > sensor.md
diff --git a/api/sensor.html b/api/sensor.html
new file mode 100644
index 0000000000..d82d21b636
--- /dev/null
+++ b/api/sensor.html
@@ -0,0 +1,1816 @@
+Packages:
+
+argoproj.io/v1alpha1
+
+
Package v1alpha1 is the v1alpha1 version of the API.
+
+Resource Types:
+
+ArtifactLocation
+
+
+(Appears on:
+TriggerTemplate)
+
+
+
ArtifactLocation describes the source location for an external minio
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+s3
+
+Argo Events common.S3Artifact
+
+ |
+
+ S3 compliant minio
+ |
+
+
+
+inline
+
+string
+
+ |
+
+ Inline minio is embedded in sensor spec as a string
+ |
+
+
+
+file
+
+
+FileArtifact
+
+
+ |
+
+ File minio is minio stored in a file
+ |
+
+
+
+url
+
+
+URLArtifact
+
+
+ |
+
+ URL to fetch the minio from
+ |
+
+
+
+configmap
+
+
+ConfigmapArtifact
+
+
+ |
+
+ Configmap that stores the minio
+ |
+
+
+
+git
+
+
+GitArtifact
+
+
+ |
+
+ Git repository hosting the minio
+ |
+
+
+
+resource
+
+
+Kubernetes meta/v1/unstructured.Unstructured
+
+
+ |
+
+ Resource is generic template for K8s resource
+ |
+
+
+
+Backoff
+
+
+(Appears on:
+TriggerPolicy)
+
+
+
Backoff for an operation
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+duration
+
+time.Duration
+
+ |
+
+ Duration is the duration in nanoseconds
+ |
+
+
+
+factor
+
+float64
+
+ |
+
+ Duration is multiplied by factor each iteration
+ |
+
+
+
+jitter
+
+float64
+
+ |
+
+ The amount of jitter applied each iteration
+ |
+
+
+
+steps
+
+int
+
+ |
+
+ Exit with error after this many steps
+ |
+
+
+
+ConfigmapArtifact
+
+
+(Appears on:
+ArtifactLocation)
+
+
+
ConfigmapArtifact contains information about minio in k8 configmap
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+string
+
+ |
+
+ Name of the configmap
+ |
+
+
+
+namespace
+
+string
+
+ |
+
+ Namespace where configmap is deployed
+ |
+
+
+
+key
+
+string
+
+ |
+
+ Key within configmap data which contains trigger resource definition
+ |
+
+
+
+DataFilter
+
+
+(Appears on:
+EventDependencyFilter)
+
+
+
DataFilter describes constraints and filters for event data
+Regular Expressions are purposefully not a feature as they are overkill for our uses here
+See Rob Pike’s Post: https://commandcenter.blogspot.com/2011/08/regular-expressions-in-lexing-and.html
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+path
+
+string
+
+ |
+
+ Path is the JSONPath of the event’s (JSON decoded) data key
+Path is a series of keys separated by a dot. A key may contain wildcard characters ‘*’ and ‘?’.
+To access an array value use the index as the key. The dot and wildcard characters can be escaped with ‘\’.
+See https://github.com/tidwall/gjson#path-syntax for more information on how to use this.
+ |
+
+
+
+type
+
+
+JSONType
+
+
+ |
+
+ Type contains the JSON type of the data
+ |
+
+
+
+value
+
+[]string
+
+ |
+
+ Value is the allowed string values for this key
+Booleans are passed using strconv.ParseBool()
+Numbers are parsed using as float64 using strconv.ParseFloat()
+Strings are taken as is
+Nils this value is ignored
+ |
+
+
+
+DependencyGroup
+
+
+(Appears on:
+SensorSpec)
+
+
+
DependencyGroup is the group of dependencies
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+string
+
+ |
+
+ Name of the group
+ |
+
+
+
+dependencies
+
+[]string
+
+ |
+
+ Dependencies of events
+ |
+
+
+
+EventDependency
+
+
+(Appears on:
+SensorSpec)
+
+
+
EventDependency describes a dependency
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+string
+
+ |
+
+ Name is a unique name of this dependency
+ |
+
+
+
+filters
+
+
+EventDependencyFilter
+
+
+ |
+
+ Filters and rules governing tolerations of success and constraints on the context and data of an event
+ |
+
+
+
+connected
+
+bool
+
+ |
+
+ Connected tells if subscription is already setup in case of nats protocol.
+ |
+
+
+
+EventDependencyFilter
+
+
+(Appears on:
+EventDependency)
+
+
+
EventDependencyFilter defines filters and constraints for a event.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+string
+
+ |
+
+ Name is the name of event filter
+ |
+
+
+
+time
+
+
+TimeFilter
+
+
+ |
+
+ Time filter on the event with escalation
+ |
+
+
+
+context
+
+Argo Events common.EventContext
+
+ |
+
+ Context filter constraints with escalation
+ |
+
+
+
+data
+
+
+[]DataFilter
+
+
+ |
+
+ Data filter constraints with escalation
+ |
+
+
+
+FileArtifact
+
+
+(Appears on:
+ArtifactLocation)
+
+
+
FileArtifact contains information about an minio in a filesystem
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+path
+
+string
+
+ |
+
+ |
+
+
+
+GitArtifact
+
+
+(Appears on:
+ArtifactLocation)
+
+
+
GitArtifact contains information about an minio stored in git
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+url
+
+string
+
+ |
+
+ Git URL
+ |
+
+
+
+cloneDirectory
+
+string
+
+ |
+
+ Directory to clone the repository. We clone complete directory because GitArtifact is not limited to any specific Git service providers.
+Hence we don’t use any specific git provider client.
+ |
+
+
+
+creds
+
+
+GitCreds
+
+
+ |
+
+(Optional)
+ Creds contain reference to git username and password
+ |
+
+
+
+namespace
+
+string
+
+ |
+
+(Optional)
+ Namespace where creds are stored.
+ |
+
+
+
+sshKeyPath
+
+string
+
+ |
+
+(Optional)
+ SSHKeyPath is path to your ssh key path. Use this if you don’t want to provide username and password.
+ssh key path must be mounted in sensor pod.
+ |
+
+
+
+filePath
+
+string
+
+ |
+
+ Path to file that contains trigger resource definition
+ |
+
+
+
+branch
+
+string
+
+ |
+
+(Optional)
+ Branch to use to pull trigger resource
+ |
+
+
+
+tag
+
+string
+
+ |
+
+(Optional)
+ Tag to use to pull trigger resource
+ |
+
+
+
+ref
+
+string
+
+ |
+
+(Optional)
+ Ref to use to pull trigger resource. Will result in a shallow clone and
+fetch.
+ |
+
+
+
+remote
+
+
+GitRemoteConfig
+
+
+ |
+
+(Optional)
+ Remote to manage set of tracked repositories. Defaults to “origin”.
+Refer https://git-scm.com/docs/git-remote
+ |
+
+
+
+GitCreds
+
+
+(Appears on:
+GitArtifact)
+
+
+
GitCreds contain reference to git username and password
+
+
+GitRemoteConfig
+
+
+(Appears on:
+GitArtifact)
+
+
+
GitRemoteConfig contains the configuration of a Git remote
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+string
+
+ |
+
+ Name of the remote to fetch from.
+ |
+
+
+
+urls
+
+[]string
+
+ |
+
+ URLs the URLs of a remote repository. It must be non-empty. Fetch will
+always use the first URL, while push will use all of them.
+ |
+
+
+
+JSONType
+(string
alias)
+
+(Appears on:
+DataFilter)
+
+
+
JSONType contains the supported JSON types for data filtering
+
+NodePhase
+(string
alias)
+
+(Appears on:
+NodeStatus,
+SensorStatus)
+
+
+
NodePhase is the label for the condition of a node
+
+NodeStatus
+
+
+(Appears on:
+SensorStatus)
+
+
+
NodeStatus describes the status for an individual node in the sensor’s FSM.
+A single node can represent the status for event or a trigger.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+id
+
+string
+
+ |
+
+ ID is a unique identifier of a node within a sensor
+It is a hash of the node name
+ |
+
+
+
+name
+
+string
+
+ |
+
+ Name is a unique name in the node tree used to generate the node ID
+ |
+
+
+
+displayName
+
+string
+
+ |
+
+ DisplayName is the human readable representation of the node
+ |
+
+
+
+type
+
+
+NodeType
+
+
+ |
+
+ Type is the type of the node
+ |
+
+
+
+phase
+
+
+NodePhase
+
+
+ |
+
+ Phase of the node
+ |
+
+
+
+startedAt
+
+
+Kubernetes meta/v1.MicroTime
+
+
+ |
+
+ StartedAt is the time at which this node started
+ |
+
+
+
+completedAt
+
+
+Kubernetes meta/v1.MicroTime
+
+
+ |
+
+ CompletedAt is the time at which this node completed
+ |
+
+
+
+message
+
+string
+
+ |
+
+ store data or something to save for event notifications or trigger events
+ |
+
+
+
+event
+
+Argo Events common.Event
+
+ |
+
+ Event stores the last seen event for this node
+ |
+
+
+
+NodeType
+(string
alias)
+
+(Appears on:
+NodeStatus)
+
+
+
NodeType is the type of a node
+
+NotificationType
+(string
alias)
+
+
NotificationType represent a type of notifications that are handled by a sensor
+
+Sensor
+
+
+
Sensor is the definition of a sensor resource
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+metadata
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+ |
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+ |
+
+
+
+spec
+
+
+SensorSpec
+
+
+ |
+
+
+
+
+
+
+dependencies
+
+
+[]EventDependency
+
+
+ |
+
+ Dependencies is a list of the events that this sensor is dependent on.
+ |
+
+
+
+triggers
+
+
+[]Trigger
+
+
+ |
+
+ Triggers is a list of the things that this sensor evokes. These are the outputs from this sensor.
+ |
+
+
+
+template
+
+
+Kubernetes core/v1.PodTemplateSpec
+
+
+ |
+
+ Template contains sensor pod specification. For more information, read https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#pod-v1-core
+ |
+
+
+
+eventProtocol
+
+Argo Events common.EventProtocol
+
+ |
+
+ EventProtocol is the protocol through which sensor receives events from gateway
+ |
+
+
+
+circuit
+
+string
+
+ |
+
+ Circuit is a boolean expression of dependency groups
+ |
+
+
+
+dependencyGroups
+
+
+[]DependencyGroup
+
+
+ |
+
+ DependencyGroups is a list of the groups of events.
+ |
+
+
+
+errorOnFailedRound
+
+bool
+
+ |
+
+ ErrorOnFailedRound if set to true, marks sensor state as error if the previous trigger round fails.
+Once sensor state is set to error , no further triggers will be processed.
+ |
+
+
+ |
+
+
+
+status
+
+
+SensorStatus
+
+
+ |
+
+ |
+
+
+
+SensorResources
+
+
+(Appears on:
+SensorStatus)
+
+
+
SensorResources holds the metadata of the resources created for the sensor
+
+
+SensorSpec
+
+
+(Appears on:
+Sensor)
+
+
+
SensorSpec represents desired sensor state
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+dependencies
+
+
+[]EventDependency
+
+
+ |
+
+ Dependencies is a list of the events that this sensor is dependent on.
+ |
+
+
+
+triggers
+
+
+[]Trigger
+
+
+ |
+
+ Triggers is a list of the things that this sensor evokes. These are the outputs from this sensor.
+ |
+
+
+
+template
+
+
+Kubernetes core/v1.PodTemplateSpec
+
+
+ |
+
+ Template contains sensor pod specification. For more information, read https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#pod-v1-core
+ |
+
+
+
+eventProtocol
+
+Argo Events common.EventProtocol
+
+ |
+
+ EventProtocol is the protocol through which sensor receives events from gateway
+ |
+
+
+
+circuit
+
+string
+
+ |
+
+ Circuit is a boolean expression of dependency groups
+ |
+
+
+
+dependencyGroups
+
+
+[]DependencyGroup
+
+
+ |
+
+ DependencyGroups is a list of the groups of events.
+ |
+
+
+
+errorOnFailedRound
+
+bool
+
+ |
+
+ ErrorOnFailedRound if set to true, marks sensor state as error if the previous trigger round fails.
+Once sensor state is set to error , no further triggers will be processed.
+ |
+
+
+
+SensorStatus
+
+
+(Appears on:
+Sensor)
+
+
+
SensorStatus contains information about the status of a sensor.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+phase
+
+
+NodePhase
+
+
+ |
+
+ Phase is the high-level summary of the sensor
+ |
+
+
+
+startedAt
+
+
+Kubernetes meta/v1.Time
+
+
+ |
+
+ StartedAt is the time at which this sensor was initiated
+ |
+
+
+
+completedAt
+
+
+Kubernetes meta/v1.Time
+
+
+ |
+
+ CompletedAt is the time at which this sensor was completed
+ |
+
+
+
+message
+
+string
+
+ |
+
+ Message is a human readable string indicating details about a sensor in its phase
+ |
+
+
+
+nodes
+
+
+map[string]github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.NodeStatus
+
+
+ |
+
+ Nodes is a mapping between a node ID and the node’s status
+it records the states for the FSM of this sensor.
+ |
+
+
+
+triggerCycleCount
+
+int32
+
+ |
+
+ TriggerCycleCount is the count of sensor’s trigger cycle runs.
+ |
+
+
+
+triggerCycleStatus
+
+
+TriggerCycleState
+
+
+ |
+
+ TriggerCycleState is the status from last cycle of triggers execution.
+ |
+
+
+
+lastCycleTime
+
+
+Kubernetes meta/v1.Time
+
+
+ |
+
+ LastCycleTime is the time when last trigger cycle completed
+ |
+
+
+
+resources
+
+
+SensorResources
+
+
+ |
+
+ Resources refers to metadata of the resources created for the sensor
+ |
+
+
+
+TimeFilter
+
+
+(Appears on:
+EventDependencyFilter)
+
+
+
TimeFilter describes a window in time.
+DataFilters out event events that occur outside the time limits.
+In other words, only events that occur after Start and before Stop
+will pass this filter.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+start
+
+string
+
+ |
+
+ Start is the beginning of a time window.
+Before this time, events for this event are ignored and
+format is hh:mm:ss
+ |
+
+
+
+stop
+
+string
+
+ |
+
+ StopPattern is the end of a time window.
+After this time, events for this event are ignored and
+format is hh:mm:ss
+ |
+
+
+
+Trigger
+
+
+(Appears on:
+SensorSpec)
+
+
+
Trigger is an action taken, output produced, an event created, a message sent
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+template
+
+
+TriggerTemplate
+
+
+ |
+
+ Template describes the trigger specification.
+ |
+
+
+
+templateParameters
+
+
+[]TriggerParameter
+
+
+ |
+
+ TemplateParameters is the list of resource parameters to pass to the template object
+ |
+
+
+
+resourceParameters
+
+
+[]TriggerParameter
+
+
+ |
+
+ ResourceParameters is the list of resource parameters to pass to resolved resource object in template object
+ |
+
+
+
+policy
+
+
+TriggerPolicy
+
+
+ |
+
+ Policy to configure backoff and execution criteria for the trigger
+ |
+
+
+
+TriggerCondition
+
+
+(Appears on:
+TriggerTemplate)
+
+
+
TriggerCondition describes condition which must be satisfied in order to execute a trigger.
+Depending upon condition type, status of dependency groups is used to evaluate the result.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+any
+
+[]string
+
+ |
+
+ Any acts as a OR operator between dependencies
+ |
+
+
+
+all
+
+[]string
+
+ |
+
+ All acts as a AND operator between dependencies
+ |
+
+
+
+TriggerCycleState
+(string
alias)
+
+(Appears on:
+SensorStatus)
+
+
+
TriggerCycleState is the label for the state of the trigger cycle
+
+TriggerParameter
+
+
+(Appears on:
+Trigger)
+
+
+
TriggerParameter indicates a passed parameter to a service template
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+src
+
+
+TriggerParameterSource
+
+
+ |
+
+ Src contains a source reference to the value of the parameter from a event event
+ |
+
+
+
+dest
+
+string
+
+ |
+
+ Dest is the JSONPath of a resource key.
+A path is a series of keys separated by a dot. The colon character can be escaped with ‘.’
+The -1 key can be used to append a value to an existing array.
+See https://github.com/tidwall/sjson#path-syntax for more information about how this is used.
+ |
+
+
+
+operation
+
+
+TriggerParameterOperation
+
+
+ |
+
+ Operation is what to do with the existing value at Dest, whether to
+‘prepend’, ‘overwrite’, or ‘append’ it.
+ |
+
+
+
+TriggerParameterOperation
+(string
alias)
+
+(Appears on:
+TriggerParameter)
+
+
+
TriggerParameterOperation represents how to set a trigger destination
+resource key
+
+TriggerParameterSource
+
+
+(Appears on:
+TriggerParameter)
+
+
+
TriggerParameterSource defines the source for a parameter from a event event
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+event
+
+string
+
+ |
+
+ Event is the name of the event for which to retrieve this event
+ |
+
+
+
+path
+
+string
+
+ |
+
+ Path is the JSONPath of the event’s (JSON decoded) data key
+Path is a series of keys separated by a dot. A key may contain wildcard characters ‘*’ and ‘?’.
+To access an array value use the index as the key. The dot and wildcard characters can be escaped with ‘\’.
+See https://github.com/tidwall/gjson#path-syntax for more information on how to use this.
+ |
+
+
+
+value
+
+string
+
+ |
+
+ Value is the default literal value to use for this parameter source
+This is only used if the path is invalid.
+If the path is invalid and this is not defined, this param source will produce an error.
+ |
+
+
+
+TriggerPolicy
+
+
+(Appears on:
+Trigger)
+
+
+
TriggerPolicy dictates the policy for the trigger retries
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+backoff
+
+
+Backoff
+
+
+ |
+
+ Backoff before checking resource state
+ |
+
+
+
+state
+
+
+TriggerStateLabels
+
+
+ |
+
+ State refers to labels used to check the resource state
+ |
+
+
+
+errorOnBackoffTimeout
+
+bool
+
+ |
+
+ ErrorOnBackoffTimeout determines whether sensor should transition to error state if the backoff times out and yet the resource neither transitioned into success or failure.
+ |
+
+
+
+TriggerStateLabels
+
+
+(Appears on:
+TriggerPolicy)
+
+
+
TriggerStateLabels defines the labels used to decide if a resource is in success or failure state.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+success
+
+map[string]string
+
+ |
+
+ Success defines labels required to identify a resource in success state
+ |
+
+
+
+failure
+
+map[string]string
+
+ |
+
+ Failure defines labels required to identify a resource in failed state
+ |
+
+
+
+TriggerTemplate
+
+
+(Appears on:
+Trigger)
+
+
+
TriggerTemplate is the template that describes trigger specification.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+string
+
+ |
+
+ Name is a unique name of the action to take
+ |
+
+
+
+when
+
+
+TriggerCondition
+
+
+ |
+
+ When is the condition to execute the trigger
+ |
+
+
+
+GroupVersionResource
+
+
+Kubernetes meta/v1.GroupVersionResource
+
+
+ |
+
+
+(Members of GroupVersionResource are embedded into this type.)
+
+The unambiguous kind of this object - used in order to retrieve the appropriate kubernetes api client for this resource
+ |
+
+
+
+source
+
+
+ArtifactLocation
+
+
+ |
+
+ Source of the K8 resource file(s)
+ |
+
+
+
+URLArtifact
+
+
+(Appears on:
+ArtifactLocation)
+
+
+
URLArtifact contains information about an minio at an http endpoint.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+path
+
+string
+
+ |
+
+ Path is the complete URL
+ |
+
+
+
+verifyCert
+
+bool
+
+ |
+
+ VerifyCert decides whether the connection is secure or not
+ |
+
+
+
+
+
+Generated with gen-crd-api-reference-docs
+on git commit 8d85191
.
+
diff --git a/api/sensor.md b/api/sensor.md
new file mode 100644
index 0000000000..bef98f80db
--- /dev/null
+++ b/api/sensor.md
@@ -0,0 +1,3661 @@
+
+
+Packages:
+
+
+
+
+
+
+
+argoproj.io/v1alpha1
+
+
+
+
+
+
+
+Package v1alpha1 is the v1alpha1 version of the API.
+
+
+
+
+
+Resource Types:
+
+
+
+
+
+ArtifactLocation
+
+
+
+
+
+(Appears on:
+TriggerTemplate)
+
+
+
+
+
+
+
+ArtifactLocation describes the source location for an external minio
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+s3 Argo Events common.S3Artifact
+
+ |
+
+
+
+
+
+S3 compliant minio
+
+
+
+ |
+
+
+
+
+
+
+
+inline string
+
+ |
+
+
+
+
+
+Inline minio is embedded in sensor spec as a string
+
+
+
+ |
+
+
+
+
+
+
+
+file
+ FileArtifact
+
+ |
+
+
+
+
+
+File minio is minio stored in a file
+
+
+
+ |
+
+
+
+
+
+
+
+url
+URLArtifact
+
+ |
+
+
+
+
+
+URL to fetch the minio from
+
+
+
+ |
+
+
+
+
+
+
+
+configmap
+ ConfigmapArtifact
+
+
+ |
+
+
+
+
+
+Configmap that stores the minio
+
+
+
+ |
+
+
+
+
+
+
+
+git
+GitArtifact
+
+ |
+
+
+
+
+
+Git repository hosting the minio
+
+
+
+ |
+
+
+
+
+
+
+
+resource
+
+Kubernetes meta/v1/unstructured.Unstructured
+
+ |
+
+
+
+
+
+Resource is generic template for K8s resource
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+Backoff
+
+
+
+
+
+(Appears on:
+TriggerPolicy)
+
+
+
+
+
+
+
+Backoff for an operation
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+duration time.Duration
+
+ |
+
+
+
+
+
+Duration is the duration in nanoseconds
+
+
+
+ |
+
+
+
+
+
+
+
+factor float64
+
+ |
+
+
+
+
+
+Duration is multiplied by factor each iteration
+
+
+
+ |
+
+
+
+
+
+
+
+jitter float64
+
+ |
+
+
+
+
+
+The amount of jitter applied each iteration
+
+
+
+ |
+
+
+
+
+
+
+
+steps int
+
+ |
+
+
+
+
+
+Exit with error after this many steps
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+ConfigmapArtifact
+
+
+
+
+
+(Appears on:
+ArtifactLocation)
+
+
+
+
+
+
+
+ConfigmapArtifact contains information about minio in k8 configmap
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+name string
+
+ |
+
+
+
+
+
+Name of the configmap
+
+
+
+ |
+
+
+
+
+
+
+
+namespace string
+
+ |
+
+
+
+
+
+Namespace where configmap is deployed
+
+
+
+ |
+
+
+
+
+
+
+
+key string
+
+ |
+
+
+
+
+
+Key within configmap data which contains trigger resource definition
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+DataFilter
+
+
+
+
+
+(Appears on:
+EventDependencyFilter)
+
+
+
+
+
+
+
+DataFilter describes constraints and filters for event data Regular
+Expressions are purposefully not a feature as they are overkill for our
+uses here See Rob Pike’s Post:
+https://commandcenter.blogspot.com/2011/08/regular-expressions-in-lexing-and.html
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+path string
+
+ |
+
+
+
+
+
+Path is the JSONPath of the event’s (JSON decoded) data key Path is a
+series of keys separated by a dot. A key may contain wildcard characters
+‘\*’ and ‘?’. To access an array value use the index as the key. The dot
+and wildcard characters can be escaped with ‘\’. See
+https://github.com/tidwall/gjson\#path-syntax
+for more information on how to use this.
+
+
+
+ |
+
+
+
+
+
+
+
+type
+JSONType
+
+ |
+
+
+
+
+
+Type contains the JSON type of the data
+
+
+
+ |
+
+
+
+
+
+
+
+value \[\]string
+
+ |
+
+
+
+
+
+Value is the allowed string values for this key Booleans are passed
+using strconv.ParseBool() Numbers are parsed using as float64 using
+strconv.ParseFloat() Strings are taken as is Nils this value is ignored
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+DependencyGroup
+
+
+
+
+
+(Appears on:
+SensorSpec)
+
+
+
+
+
+
+
+DependencyGroup is the group of dependencies
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+name string
+
+ |
+
+
+
+
+
+Name of the group
+
+
+
+ |
+
+
+
+
+
+
+
+dependencies \[\]string
+
+ |
+
+
+
+
+
+Dependencies of events
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+EventDependency
+
+
+
+
+
+(Appears on:
+SensorSpec)
+
+
+
+
+
+
+
+EventDependency describes a dependency
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+name string
+
+ |
+
+
+
+
+
+Name is a unique name of this dependency
+
+
+
+ |
+
+
+
+
+
+
+
+filters
+
+EventDependencyFilter
+
+ |
+
+
+
+
+
+Filters and rules governing tolerations of success and constraints on
+the context and data of an event
+
+
+
+ |
+
+
+
+
+
+
+
+connected bool
+
+ |
+
+
+
+
+
+Connected tells if subscription is already setup in case of nats
+protocol.
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+EventDependencyFilter
+
+
+
+
+
+(Appears on:
+EventDependency)
+
+
+
+
+
+
+
+EventDependencyFilter defines filters and constraints for a event.
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+name string
+
+ |
+
+
+
+
+
+Name is the name of event filter
+
+
+
+ |
+
+
+
+
+
+
+
+time
+TimeFilter
+
+ |
+
+
+
+
+
+Time filter on the event with escalation
+
+
+
+ |
+
+
+
+
+
+
+
+context Argo Events common.EventContext
+
+ |
+
+
+
+
+
+Context filter constraints with escalation
+
+
+
+ |
+
+
+
+
+
+
+
+data
+\[\]DataFilter
+
+ |
+
+
+
+
+
+Data filter constraints with escalation
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+FileArtifact
+
+
+
+
+
+(Appears on:
+ArtifactLocation)
+
+
+
+
+
+
+
+FileArtifact contains information about an minio in a filesystem
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+path string
+
+ |
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+GitArtifact
+
+
+
+
+
+(Appears on:
+ArtifactLocation)
+
+
+
+
+
+
+
+GitArtifact contains information about an minio stored in git
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+url string
+
+ |
+
+
+
+
+
+Git URL
+
+
+
+ |
+
+
+
+
+
+
+
+cloneDirectory string
+
+ |
+
+
+
+
+
+Directory to clone the repository. We clone complete directory because
+GitArtifact is not limited to any specific Git service providers. Hence
+we don’t use any specific git provider client.
+
+
+
+ |
+
+
+
+
+
+
+
+creds
+GitCreds
+
+ |
+
+
+
+(Optional)
+
+
+
+Creds contain reference to git username and password
+
+
+
+ |
+
+
+
+
+
+
+
+namespace string
+
+ |
+
+
+
+(Optional)
+
+
+
+Namespace where creds are stored.
+
+
+
+ |
+
+
+
+
+
+
+
+sshKeyPath string
+
+ |
+
+
+
+(Optional)
+
+
+
+SSHKeyPath is path to your ssh key path. Use this if you don’t want to
+provide username and password. ssh key path must be mounted in sensor
+pod.
+
+
+
+ |
+
+
+
+
+
+
+
+filePath string
+
+ |
+
+
+
+
+
+Path to file that contains trigger resource definition
+
+
+
+ |
+
+
+
+
+
+
+
+branch string
+
+ |
+
+
+
+(Optional)
+
+
+
+Branch to use to pull trigger resource
+
+
+
+ |
+
+
+
+
+
+
+
+tag string
+
+ |
+
+
+
+(Optional)
+
+
+
+Tag to use to pull trigger resource
+
+
+
+ |
+
+
+
+
+
+
+
+ref string
+
+ |
+
+
+
+(Optional)
+
+
+
+Ref to use to pull trigger resource. Will result in a shallow clone and
+fetch.
+
+
+
+ |
+
+
+
+
+
+
+
+remote
+ GitRemoteConfig
+
+
+ |
+
+
+
+(Optional)
+
+
+
+Remote to manage set of tracked repositories. Defaults to “origin”.
+Refer
+https://git-scm.com/docs/git-remote
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+GitCreds
+
+
+
+
+
+(Appears on:
+GitArtifact)
+
+
+
+
+
+
+
+GitCreds contain reference to git username and password
+
+
+
+
+
+
+
+
+
+GitRemoteConfig
+
+
+
+
+
+(Appears on:
+GitArtifact)
+
+
+
+
+
+
+
+GitRemoteConfig contains the configuration of a Git remote
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+name string
+
+ |
+
+
+
+
+
+Name of the remote to fetch from.
+
+
+
+ |
+
+
+
+
+
+
+
+urls \[\]string
+
+ |
+
+
+
+
+
+URLs the URLs of a remote repository. It must be non-empty. Fetch will
+always use the first URL, while push will use all of them.
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+JSONType (string
alias)
+
+
+
+
+
+
+
+(Appears on:
+DataFilter)
+
+
+
+
+
+
+
+JSONType contains the supported JSON types for data filtering
+
+
+
+
+
+
+
+NodePhase (string
alias)
+
+
+
+
+
+
+
+(Appears on:
+NodeStatus,
+SensorStatus)
+
+
+
+
+
+
+
+NodePhase is the label for the condition of a node
+
+
+
+
+
+
+
+NodeStatus
+
+
+
+
+
+(Appears on:
+SensorStatus)
+
+
+
+
+
+
+
+NodeStatus describes the status for an individual node in the sensor’s
+FSM. A single node can represent the status for event or a trigger.
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+id string
+
+ |
+
+
+
+
+
+ID is a unique identifier of a node within a sensor It is a hash of the
+node name
+
+
+
+ |
+
+
+
+
+
+
+
+name string
+
+ |
+
+
+
+
+
+Name is a unique name in the node tree used to generate the node ID
+
+
+
+ |
+
+
+
+
+
+
+
+displayName string
+
+ |
+
+
+
+
+
+DisplayName is the human readable representation of the node
+
+
+
+ |
+
+
+
+
+
+
+
+type
+NodeType
+
+ |
+
+
+
+
+
+Type is the type of the node
+
+
+
+ |
+
+
+
+
+
+
+
+phase
+NodePhase
+
+ |
+
+
+
+
+
+Phase of the node
+
+
+
+ |
+
+
+
+
+
+
+
+startedAt
+
+Kubernetes meta/v1.MicroTime
+
+ |
+
+
+
+
+
+StartedAt is the time at which this node started
+
+
+
+ |
+
+
+
+
+
+
+
+completedAt
+
+Kubernetes meta/v1.MicroTime
+
+ |
+
+
+
+
+
+CompletedAt is the time at which this node completed
+
+
+
+ |
+
+
+
+
+
+
+
+message string
+
+ |
+
+
+
+
+
+store data or something to save for event notifications or trigger
+events
+
+
+
+ |
+
+
+
+
+
+
+
+event Argo Events common.Event
+
+ |
+
+
+
+
+
+Event stores the last seen event for this node
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+NodeType (string
alias)
+
+
+
+
+
+
+
+(Appears on:
+NodeStatus)
+
+
+
+
+
+
+
+NodeType is the type of a node
+
+
+
+
+
+
+
+NotificationType (string
alias)
+
+
+
+
+
+
+
+
+
+NotificationType represent a type of notifications that are handled by a
+sensor
+
+
+
+
+
+
+
+Sensor
+
+
+
+
+
+
+
+Sensor is the definition of a sensor resource
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+metadata
+
+Kubernetes meta/v1.ObjectMeta
+
+ |
+
+
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+
+ |
+
+
+
+
+
+
+
+spec
+SensorSpec
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+dependencies
+ \[\]EventDependency
+
+
+ |
+
+
+
+
+
+Dependencies is a list of the events that this sensor is dependent on.
+
+
+
+ |
+
+
+
+
+
+
+
+triggers
+\[\]Trigger
+
+ |
+
+
+
+
+
+Triggers is a list of the things that this sensor evokes. These are the
+outputs from this sensor.
+
+
+
+ |
+
+
+
+
+
+
+
+template
+
+Kubernetes core/v1.PodTemplateSpec
+
+ |
+
+
+
+
+
+Template contains sensor pod specification. For more information, read
+https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/\#pod-v1-core
+
+
+
+ |
+
+
+
+
+
+
+
+eventProtocol Argo Events common.EventProtocol
+
+
+ |
+
+
+
+
+
+EventProtocol is the protocol through which sensor receives events from
+gateway
+
+
+
+ |
+
+
+
+
+
+
+
+circuit string
+
+ |
+
+
+
+
+
+Circuit is a boolean expression of dependency groups
+
+
+
+ |
+
+
+
+
+
+
+
+dependencyGroups
+ \[\]DependencyGroup
+
+
+ |
+
+
+
+
+
+DependencyGroups is a list of the groups of events.
+
+
+
+ |
+
+
+
+
+
+
+
+errorOnFailedRound bool
+
+ |
+
+
+
+
+
+ErrorOnFailedRound if set to true, marks sensor state as
+error if the previous trigger round fails. Once sensor
+state is set to error , no further triggers will be
+processed.
+
+
+
+ |
+
+
+
+
+
+ |
+
+
+
+
+
+
+
+status
+ SensorStatus
+
+ |
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+SensorResources
+
+
+
+
+
+(Appears on:
+SensorStatus)
+
+
+
+
+
+
+
+SensorResources holds the metadata of the resources created for the
+sensor
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+deployment
+
+Kubernetes meta/v1.ObjectMeta
+
+ |
+
+
+
+
+
+Deployment holds the metadata of the deployment for the sensor
+
+
+
+ |
+
+
+
+
+
+
+
+service
+
+Kubernetes meta/v1.ObjectMeta
+
+ |
+
+
+
+(Optional)
+
+
+
+Service holds the metadata of the service for the sensor
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+SensorSpec
+
+
+
+
+
+(Appears on: Sensor)
+
+
+
+
+
+
+
+SensorSpec represents desired sensor state
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+dependencies
+ \[\]EventDependency
+
+
+ |
+
+
+
+
+
+Dependencies is a list of the events that this sensor is dependent on.
+
+
+
+ |
+
+
+
+
+
+
+
+triggers
+\[\]Trigger
+
+ |
+
+
+
+
+
+Triggers is a list of the things that this sensor evokes. These are the
+outputs from this sensor.
+
+
+
+ |
+
+
+
+
+
+
+
+template
+
+Kubernetes core/v1.PodTemplateSpec
+
+ |
+
+
+
+
+
+Template contains sensor pod specification. For more information, read
+https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/\#pod-v1-core
+
+
+
+ |
+
+
+
+
+
+
+
+eventProtocol Argo Events common.EventProtocol
+
+
+ |
+
+
+
+
+
+EventProtocol is the protocol through which sensor receives events from
+gateway
+
+
+
+ |
+
+
+
+
+
+
+
+circuit string
+
+ |
+
+
+
+
+
+Circuit is a boolean expression of dependency groups
+
+
+
+ |
+
+
+
+
+
+
+
+dependencyGroups
+ \[\]DependencyGroup
+
+
+ |
+
+
+
+
+
+DependencyGroups is a list of the groups of events.
+
+
+
+ |
+
+
+
+
+
+
+
+errorOnFailedRound bool
+
+ |
+
+
+
+
+
+ErrorOnFailedRound if set to true, marks sensor state as
+error if the previous trigger round fails. Once sensor
+state is set to error , no further triggers will be
+processed.
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+SensorStatus
+
+
+
+
+
+(Appears on: Sensor)
+
+
+
+
+
+
+
+SensorStatus contains information about the status of a sensor.
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+phase
+NodePhase
+
+ |
+
+
+
+
+
+Phase is the high-level summary of the sensor
+
+
+
+ |
+
+
+
+
+
+
+
+startedAt
+
+Kubernetes meta/v1.Time
+
+ |
+
+
+
+
+
+StartedAt is the time at which this sensor was initiated
+
+
+
+ |
+
+
+
+
+
+
+
+completedAt
+
+Kubernetes meta/v1.Time
+
+ |
+
+
+
+
+
+CompletedAt is the time at which this sensor was completed
+
+
+
+ |
+
+
+
+
+
+
+
+message string
+
+ |
+
+
+
+
+
+Message is a human readable string indicating details about a sensor in
+its phase
+
+
+
+ |
+
+
+
+
+
+
+
+nodes
+map\[string\]github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.NodeStatus
+
+
+ |
+
+
+
+
+
+Nodes is a mapping between a node ID and the node’s status it records
+the states for the FSM of this sensor.
+
+
+
+ |
+
+
+
+
+
+
+
+triggerCycleCount int32
+
+ |
+
+
+
+
+
+TriggerCycleCount is the count of sensor’s trigger cycle runs.
+
+
+
+ |
+
+
+
+
+
+
+
+triggerCycleStatus
+ TriggerCycleState
+
+
+ |
+
+
+
+
+
+TriggerCycleState is the status from last cycle of triggers execution.
+
+
+
+ |
+
+
+
+
+
+
+
+lastCycleTime
+
+Kubernetes meta/v1.Time
+
+ |
+
+
+
+
+
+LastCycleTime is the time when last trigger cycle completed
+
+
+
+ |
+
+
+
+
+
+
+
+resources
+ SensorResources
+
+
+ |
+
+
+
+
+
+Resources refers to metadata of the resources created for the sensor
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+TimeFilter
+
+
+
+
+
+(Appears on:
+EventDependencyFilter)
+
+
+
+
+
+
+
+TimeFilter describes a window in time. DataFilters out event events that
+occur outside the time limits. In other words, only events that occur
+after Start and before Stop will pass this filter.
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+start string
+
+ |
+
+
+
+
+
+Start is the beginning of a time window. Before this time, events for
+this event are ignored and format is hh:mm:ss
+
+
+
+ |
+
+
+
+
+
+
+
+stop string
+
+ |
+
+
+
+
+
+StopPattern is the end of a time window. After this time, events for
+this event are ignored and format is hh:mm:ss
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+Trigger
+
+
+
+
+
+(Appears on:
+SensorSpec)
+
+
+
+
+
+
+
+Trigger is an action taken, output produced, an event created, a message
+sent
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+template
+ TriggerTemplate
+
+
+ |
+
+
+
+
+
+Template describes the trigger specification.
+
+
+
+ |
+
+
+
+
+
+
+
+templateParameters
+ \[\]TriggerParameter
+
+
+ |
+
+
+
+
+
+TemplateParameters is the list of resource parameters to pass to the
+template object
+
+
+
+ |
+
+
+
+
+
+
+
+resourceParameters
+ \[\]TriggerParameter
+
+
+ |
+
+
+
+
+
+ResourceParameters is the list of resource parameters to pass to
+resolved resource object in template object
+
+
+
+ |
+
+
+
+
+
+
+
+policy
+ TriggerPolicy
+
+ |
+
+
+
+
+
+Policy to configure backoff and execution criteria for the trigger
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+TriggerCondition
+
+
+
+
+
+(Appears on:
+TriggerTemplate)
+
+
+
+
+
+
+
+TriggerCondition describes condition which must be satisfied in order to
+execute a trigger. Depending upon condition type, status of dependency
+groups is used to evaluate the result.
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+any \[\]string
+
+ |
+
+
+
+
+
+Any acts as a OR operator between dependencies
+
+
+
+ |
+
+
+
+
+
+
+
+all \[\]string
+
+ |
+
+
+
+
+
+All acts as a AND operator between dependencies
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+TriggerCycleState (string
alias)
+
+
+
+
+
+
+
+(Appears on:
+SensorStatus)
+
+
+
+
+
+
+
+TriggerCycleState is the label for the state of the trigger cycle
+
+
+
+
+
+
+
+TriggerParameter
+
+
+
+
+
+(Appears on:
+Trigger)
+
+
+
+
+
+
+
+TriggerParameter indicates a passed parameter to a service template
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+src
+
+TriggerParameterSource
+
+ |
+
+
+
+
+
+Src contains a source reference to the value of the parameter from a
+event event
+
+
+
+ |
+
+
+
+
+
+
+
+dest string
+
+ |
+
+
+
+
+
+Dest is the JSONPath of a resource key. A path is a series of keys
+separated by a dot. The colon character can be escaped with ‘.’ The -1
+key can be used to append a value to an existing array. See
+https://github.com/tidwall/sjson\#path-syntax
+for more information about how this is used.
+
+
+
+ |
+
+
+
+
+
+
+
+operation
+
+TriggerParameterOperation
+
+ |
+
+
+
+
+
+Operation is what to do with the existing value at Dest, whether to
+‘prepend’, ‘overwrite’, or ‘append’ it.
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+TriggerParameterOperation (string
alias)
+
+
+
+
+
+
+
+(Appears on:
+TriggerParameter)
+
+
+
+
+
+
+
+TriggerParameterOperation represents how to set a trigger destination
+resource key
+
+
+
+
+
+
+
+TriggerParameterSource
+
+
+
+
+
+(Appears on:
+TriggerParameter)
+
+
+
+
+
+
+
+TriggerParameterSource defines the source for a parameter from a event
+event
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+event string
+
+ |
+
+
+
+
+
+Event is the name of the event for which to retrieve this event
+
+
+
+ |
+
+
+
+
+
+
+
+path string
+
+ |
+
+
+
+
+
+Path is the JSONPath of the event’s (JSON decoded) data key Path is a
+series of keys separated by a dot. A key may contain wildcard characters
+‘\*’ and ‘?’. To access an array value use the index as the key. The dot
+and wildcard characters can be escaped with ‘\’. See
+https://github.com/tidwall/gjson\#path-syntax
+for more information on how to use this.
+
+
+
+ |
+
+
+
+
+
+
+
+value string
+
+ |
+
+
+
+
+
+Value is the default literal value to use for this parameter source This
+is only used if the path is invalid. If the path is invalid and this is
+not defined, this param source will produce an error.
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+TriggerPolicy
+
+
+
+
+
+(Appears on:
+Trigger)
+
+
+
+
+
+
+
+TriggerPolicy dictates the policy for the trigger retries
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+backoff
+Backoff
+
+ |
+
+
+
+
+
+Backoff before checking resource state
+
+
+
+ |
+
+
+
+
+
+
+
+state
+ TriggerStateLabels
+
+
+ |
+
+
+
+
+
+State refers to labels used to check the resource state
+
+
+
+ |
+
+
+
+
+
+
+
+errorOnBackoffTimeout bool
+
+ |
+
+
+
+
+
+ErrorOnBackoffTimeout determines whether sensor should transition to
+error state if the backoff times out and yet the resource neither
+transitioned into success or failure.
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+TriggerStateLabels
+
+
+
+
+
+(Appears on:
+TriggerPolicy)
+
+
+
+
+
+
+
+TriggerStateLabels defines the labels used to decide if a resource is in
+success or failure state.
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+success map\[string\]string
+
+ |
+
+
+
+
+
+Success defines labels required to identify a resource in success state
+
+
+
+ |
+
+
+
+
+
+
+
+failure map\[string\]string
+
+ |
+
+
+
+
+
+Failure defines labels required to identify a resource in failed state
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+TriggerTemplate
+
+
+
+
+
+(Appears on:
+Trigger)
+
+
+
+
+
+
+
+TriggerTemplate is the template that describes trigger specification.
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+name string
+
+ |
+
+
+
+
+
+Name is a unique name of the action to take
+
+
+
+ |
+
+
+
+
+
+
+
+when
+ TriggerCondition
+
+
+ |
+
+
+
+
+
+When is the condition to execute the trigger
+
+
+
+ |
+
+
+
+
+
+
+
+GroupVersionResource
+
+Kubernetes meta/v1.GroupVersionResource
+
+ |
+
+
+
+
+
+(Members of GroupVersionResource are embedded into this
+type.)
+
+
+
+
+
+The unambiguous kind of this object - used in order to retrieve the
+appropriate kubernetes api client for this resource
+
+
+
+ |
+
+
+
+
+
+
+
+source
+ ArtifactLocation
+
+
+ |
+
+
+
+
+
+Source of the K8 resource file(s)
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+URLArtifact
+
+
+
+
+
+(Appears on:
+ArtifactLocation)
+
+
+
+
+
+
+
+URLArtifact contains information about an minio at an http endpoint.
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field
+
+ |
+
+
+
+Description
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+path string
+
+ |
+
+
+
+
+
+Path is the complete URL
+
+
+
+ |
+
+
+
+
+
+
+
+verifyCert bool
+
+ |
+
+
+
+
+
+VerifyCert decides whether the connection is secure or not
+
+
+
+ |
+
+
+
+
+
+
+
+
+
+
+
+ Generated with gen-crd-api-reference-docs
on git
+commit 8d85191
.
+
+
diff --git a/common/common.go b/common/common.go
index 305ec1a5d9..06d166e88f 100644
--- a/common/common.go
+++ b/common/common.go
@@ -17,156 +17,106 @@ limitations under the License.
package common
import (
- "github.com/argoproj/argo-events/pkg/apis/gateway"
- "github.com/argoproj/argo-events/pkg/apis/sensor"
+ "github.com/pkg/errors"
)
+// Defaults
const (
-
// ErrorResponse for http request
ErrorResponse = "Error"
-
// StandardTimeFormat is time format reference for golang
StandardTimeFormat = "2006-01-02 15:04:05"
-
// StandardYYYYMMDDFormat formats date in yyyy-mm-dd format
StandardYYYYMMDDFormat = "2006-01-02"
-
// DefaultControllerNamespace is the default namespace where the sensor and gateways controllers are installed
DefaultControllerNamespace = "argo-events"
)
-// ENV VARS
+// Environment variables
const (
// EnvVarKubeConfig is the path to the Kubernetes configuration
EnvVarKubeConfig = "KUBE_CONFIG"
-
// EnvVarDebugLog is the env var to turn on the debug mode for logging
EnvVarDebugLog = "DEBUG_LOG"
)
-// LABELS
+// Controller environment variables
const (
- // LabelOperation is a label for an operation in framework
- LabelOperation = "operation"
-
- // LabelEventSource is label for event name
- LabelEventSource = "event-source"
+ // EnvVarControllerConfigMap contains name of the configmap to retrieve controller configuration from
+ EnvVarControllerConfigMap = "CONTROLLER_CONFIG_MAP"
+ // EnvVarControllerInstanceID is used to get controller instance id
+ EnvVarControllerInstanceID = "CONTROLLER_INSTANCE_ID"
+ // EnvVarControllerName is used to get name of the controller
+ EnvVarControllerName = "CONTROLLER_NAME"
+ // EnvVarResourceName refers env var for name of the resource
+ EnvVarResourceName = "NAME"
+ // EnvVarNamespace refers to a K8s namespace
+ EnvVarNamespace = "NAMESPACE"
)
-// SENSOR CONTROLLER CONSTANTS
+// Controller labels
const (
- // env variables constants
- //LabelKeySensorControllerInstanceID is the label which allows to separate application among multiple running sensor controllers.
- LabelKeySensorControllerInstanceID = sensor.FullName + "/sensor-controller-instanceid"
-
- // LabelSensorKeyPhase is a label applied to sensors to indicate the current phase of the sensor (for filtering purposes)
- LabelSensorKeyPhase = sensor.FullName + "/phase"
-
- // LabelSensorKeyComplete is the label to mark sensors as complete
- LabelSensorKeyComplete = sensor.FullName + "/complete"
-
- // EnvVarSensorControllerConfigMap is the name of the configmap to use for the sensor-controller
- EnvVarSensorControllerConfigMap = "SENSOR_CONFIG_MAP"
-
- // labels constants
- // LabelSensorControllerName is the default deployment name of the sensor-controller
- LabelSensorControllerName = "sensor-controller"
-
- LabelArgoEventsSensorVersion = "argo-events-sensor-version"
+ // LabelGatewayName is the label for the K8s resource name
+ LabelResourceName = "resource-name"
+ // LabelControllerName is th label for the controller name
+ LabelControllerName = "controller-name"
+)
- // SensorControllerConfigMapKey is the key in the configmap to retrieve sensor configuration from.
+const (
+ // GatewayControllerConfigMapKey is the key in the configmap to retrieve controller configuration from.
// Content encoding is expected to be YAML.
- SensorControllerConfigMapKey = "config"
-
- // miscellaneous constants
- // AnnotationSensorResourceSpecHashName is the annotation of a sensor resource spec hash
- AnnotationSensorResourceSpecHashName = sensor.FullName + "/resource-spec-hash"
+ ControllerConfigMapKey = "config"
)
-// SENSOR CONSTANTS
+// Sensor constants
const (
// SensorServiceEndpoint is the endpoint to dispatch the event to
SensorServiceEndpoint = "/"
-
// SensorName refers env var for name of sensor
SensorName = "SENSOR_NAME"
-
// SensorNamespace is used to get namespace where sensors are deployed
SensorNamespace = "SENSOR_NAMESPACE"
-
// LabelSensorName is label for sensor name
LabelSensorName = "sensor-name"
-
- // EnvVarSensorControllerInstanceID is used to get sensor controller instance id
- EnvVarSensorControllerInstanceID = "SENSOR_CONTROLLER_INSTANCE_ID"
-)
-
-// GATEWAY CONTROLLER CONSTANTS
-const (
- // env variables
- // EnvVarGatewayControllerConfigMap contains name of the configmap to retrieve gateway-controller configuration from
- EnvVarGatewayControllerConfigMap = "GATEWAY_CONTROLLER_CONFIG_MAP"
-
- // EnvVarGatewayControllerInstanceID is used to get gateway controller instance id
- EnvVarGatewayControllerInstanceID = "GATEWAY_CONTROLLER_INSTANCE_ID"
-
- // EnvVarGatewayControllerName is used to get name of gateway controller
- EnvVarGatewayControllerName = "GATEWAY_CONTROLLER_NAME"
-
- // EnvVarGatewayName refers env var for name of gateway
- EnvVarGatewayName = "GATEWAY_NAME"
-
- // EnvVarGatewayNamespace is namespace where gateway controller is deployed
- EnvVarGatewayNamespace = "GATEWAY_NAMESPACE"
-
- // labels
- // LabelGatewayControllerName is the default deployment name of the gateway-controller-controller
- LabelGatewayControllerName = "gateway-controller"
-
- //LabelKeyGatewayControllerInstanceID is the label which allows to separate application among multiple running gateway-controller controllers.
- LabelKeyGatewayControllerInstanceID = gateway.FullName + "/gateway-controller-instanceid"
-
- // LabelGatewayKeyPhase is a label applied to gateways to indicate the current phase of the gateway-controller (for filtering purposes)
- LabelGatewayKeyPhase = gateway.FullName + "/phase"
-
- // LabelGatewayName is the label for gateway name
- LabelGatewayName = "gateway-name"
-
- // LabelArgoEventsGatewayVersion is the label for the gateway version
- LabelArgoEventsGatewayVersion = "argo-events-gateway-version"
-
- // AnnotationGatewayResourceSpecHashName is the annotation of a gateway resource spec hash
- AnnotationGatewayResourceSpecHashName = gateway.FullName + "/resource-spec-hash"
-
- // GatewayControllerConfigMapKey is the key in the configmap to retrieve gateway-controller configuration from.
- // Content encoding is expected to be YAML.
- GatewayControllerConfigMapKey = "config"
)
-// GATEWAY CONSTANTS
+// Gateway constants
const (
- // LabelGatewayEventSourceName is the label for a event source in gateway
- LabelGatewayEventSourceName = "event-source-name"
-
- // LabelGatewayEventSourceID is the label for gateway configuration ID
- LabelGatewayEventSourceID = "event-source-id"
-
- // LabelArgoEventsEventSourceVersion is the label for event source version
- LabelArgoEventsEventSourceVersion = "argo-events-event-source-version"
-
- // EnvVarGatewayEventSourceConfigMap is used to get map containing event sources to run in a gateway
- EnvVarGatewayEventSourceConfigMap = "GATEWAY_EVENT_SOURCE_CONFIG_MAP"
-
+ // LabelEventSourceName is the label for a event source in gateway
+ LabelEventSourceName = "event-source-name"
+ // LabelEventSourceID is the label for gateway configuration ID
+ LabelEventSourceID = "event-source-id"
EnvVarGatewayServerPort = "GATEWAY_SERVER_PORT"
-
// Server Connection Timeout, 10 seconds
ServerConnTimeout = 10
)
+const (
+ // EnvVarEventSource refers to event source name
+ EnvVarEventSource = "EVENT_SOURCE"
+ // AnnotationResourceSpecHash is the annotation of a K8s resource spec hash
+ AnnotationResourceSpecHash = "resource-spec-hash"
+)
+
// CloudEvents constants
const (
// CloudEventsVersion is the version of the CloudEvents spec targeted+
// by this library.
CloudEventsVersion = "0.1"
)
+
+var (
+ ErrNilEventSource = errors.New("event source can't be nil")
+)
+
+// Miscellaneous Labels
+const (
+ // LabelOperation is a label for an operation in framework
+ LabelOperation = "operation"
+ // LabelEventSource is label for event name
+ LabelEventSource = "event-source"
+ // LabelOwnerName is the label for resource owner name
+ LabelOwnerName = "owner-name"
+ // LabelObjectName is the label for object name
+ LabelObjectName = "object-name"
+)
diff --git a/common/logger.go b/common/logger.go
index 3736e2314e..7666c8877e 100644
--- a/common/logger.go
+++ b/common/logger.go
@@ -23,21 +23,22 @@ import (
// Logger constants
const (
- LabelNamespace = "namespace"
- LabelPhase = "phase"
- LabelInstanceID = "instance-id"
- LabelPodName = "pod-name"
- LabelServiceName = "svc-name"
- LabelEndpoint = "endpoint"
- LabelPort = "port"
- LabelURL = "url"
- LabelNodeName = "node-name"
- LabelNodeType = "node-type"
- LabelHTTPMethod = "http-method"
- LabelClientID = "client-id"
- LabelVersion = "version"
- LabelTime = "time"
- LabelTriggerName = "trigger-name"
+ LabelNamespace = "namespace"
+ LabelPhase = "phase"
+ LabelInstanceID = "instance-id"
+ LabelPodName = "pod-name"
+ LabelDeploymentName = "deployment-name"
+ LabelServiceName = "svc-name"
+ LabelEndpoint = "endpoint"
+ LabelPort = "port"
+ LabelURL = "url"
+ LabelNodeName = "node-name"
+ LabelNodeType = "node-type"
+ LabelHTTPMethod = "http-method"
+ LabelClientID = "client-id"
+ LabelVersion = "version"
+ LabelTime = "time"
+ LabelTriggerName = "trigger-name"
)
// NewArgoEventsLogger returns a new ArgoEventsLogger
diff --git a/common/util.go b/common/util.go
index f39d292331..9a2c89b648 100644
--- a/common/util.go
+++ b/common/util.go
@@ -21,8 +21,8 @@ import (
"fmt"
"hash/fnv"
"net/http"
+ "strings"
- corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/discovery"
@@ -35,14 +35,9 @@ func DefaultConfigMapName(controllerName string) string {
return fmt.Sprintf("%s-configmap", controllerName)
}
-// DefaultServiceName returns a formulated name for a service
-func DefaultServiceName(serviceName string) string {
- return fmt.Sprintf("%s-svc", serviceName)
-}
-
// ServiceDNSName returns a formulated dns name for a service
func ServiceDNSName(serviceName, namespace string) string {
- return fmt.Sprintf("%s-svc.%s.svc.cluster.local", serviceName, namespace)
+ return fmt.Sprintf("%s.%s.svc.cluster.local", serviceName, namespace)
}
// DefaultEventSourceName returns a formulated name for a gateway configuration
@@ -111,12 +106,19 @@ func GetObjectHash(obj metav1.Object) (string, error) {
return Hasher(string(b)), nil
}
-func CheckEventSourceVersion(cm *corev1.ConfigMap) error {
- if cm.Labels == nil {
- return fmt.Errorf("labels can't be empty. event source must be specified in as %s label", LabelArgoEventsEventSourceVersion)
+// FormatEndpoint returns a formatted api endpoint
+func FormatEndpoint(endpoint string) string {
+ if !strings.HasPrefix(endpoint, "/") {
+ return fmt.Sprintf("/%s", endpoint)
}
- if _, ok := cm.Labels[LabelArgoEventsEventSourceVersion]; !ok {
- return fmt.Errorf("event source must be specified in as %s label", LabelArgoEventsEventSourceVersion)
- }
- return nil
+ return endpoint
+}
+
+// FormattedURL returns a formatted url
+func FormattedURL(url, endpoint string) string {
+ return fmt.Sprintf("%s%s", url, FormatEndpoint(endpoint))
+}
+
+func ErrEventSourceTypeMismatch(eventSourceType string) string {
+ return fmt.Sprintf("event source is not type of %s", eventSourceType)
}
diff --git a/common/util_test.go b/common/util_test.go
index 7d3592b875..aded244489 100644
--- a/common/util_test.go
+++ b/common/util_test.go
@@ -17,7 +17,6 @@ limitations under the License.
package common
import (
- "fmt"
"github.com/smartystreets/goconvey/convey"
"net/http"
"testing"
@@ -66,12 +65,6 @@ func TestDefaultConfigMapName(t *testing.T) {
assert.Equal(t, "sensor-controller-configmap", res)
}
-func TestDefaultServiceName(t *testing.T) {
- convey.Convey("Given a service, get the default name", t, func() {
- convey.So(DefaultServiceName("default"), convey.ShouldEqual, fmt.Sprintf("%s-svc", "default"))
- })
-}
-
func TestDefaultNatsQueueName(t *testing.T) {
convey.Convey("Given a nats queue, get the default name", t, func() {
convey.So(DefaultNatsQueueName("default"), convey.ShouldEqual, "default-queue")
@@ -114,3 +107,15 @@ func TestServerResourceForGroupVersionKind(t *testing.T) {
})
})
}
+
+func TestFormatWebhookEndpoint(t *testing.T) {
+ convey.Convey("Given a webhook endpoint, format it", t, func() {
+ convey.So(FormatEndpoint("hello"), convey.ShouldEqual, "/hello")
+ })
+}
+
+func TestGenerateFormattedURL(t *testing.T) {
+ convey.Convey("Given a webhook, generate formatted URL", t, func() {
+ convey.So(FormattedURL("test-url", "fake"), convey.ShouldEqual, "test-url/fake")
+ })
+}
diff --git a/controllers/common/informer.go b/controllers/common/informer.go
deleted file mode 100644
index 73d7de5a65..0000000000
--- a/controllers/common/informer.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package common
-
-import (
- "fmt"
-
- "k8s.io/apimachinery/pkg/api/meta"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/client-go/informers"
- informersv1 "k8s.io/client-go/informers/core/v1"
- "k8s.io/client-go/tools/cache"
- "k8s.io/client-go/util/workqueue"
-)
-
-// ArgoEventInformerFactory holds values to create SharedInformerFactory of argo-events
-type ArgoEventInformerFactory struct {
- OwnerGroupVersionKind schema.GroupVersionKind
- OwnerInformer cache.SharedIndexInformer
- informers.SharedInformerFactory
- Queue workqueue.RateLimitingInterface
-}
-
-// NewPodInformer returns a PodInformer of argo-events
-func (c *ArgoEventInformerFactory) NewPodInformer() informersv1.PodInformer {
- podInformer := c.SharedInformerFactory.Core().V1().Pods()
- podInformer.Informer().AddEventHandler(
- cache.ResourceEventHandlerFuncs{
- DeleteFunc: func(obj interface{}) {
- owner, err := c.getOwner(obj)
- if err != nil {
- return
- }
- key, err := cache.MetaNamespaceKeyFunc(owner)
- if err == nil {
- c.Queue.Add(key)
- }
- },
- },
- )
- return podInformer
-}
-
-// NewServiceInformer returns a ServiceInformer of argo-events
-func (c *ArgoEventInformerFactory) NewServiceInformer() informersv1.ServiceInformer {
- svcInformer := c.SharedInformerFactory.Core().V1().Services()
- svcInformer.Informer().AddEventHandler(
- cache.ResourceEventHandlerFuncs{
- DeleteFunc: func(obj interface{}) {
- owner, err := c.getOwner(obj)
- if err != nil {
- return
- }
- key, err := cache.MetaNamespaceKeyFunc(owner)
- if err == nil {
- c.Queue.Add(key)
- }
- },
- },
- )
- return svcInformer
-}
-
-func (c *ArgoEventInformerFactory) getOwner(obj interface{}) (interface{}, error) {
- m, err := meta.Accessor(obj)
- if err != nil {
- return nil, err
- }
- for _, owner := range m.GetOwnerReferences() {
-
- if owner.APIVersion == c.OwnerGroupVersionKind.GroupVersion().String() &&
- owner.Kind == c.OwnerGroupVersionKind.Kind {
- key := owner.Name
- if len(m.GetNamespace()) > 0 {
- key = m.GetNamespace() + "/" + key
- }
- obj, exists, err := c.OwnerInformer.GetIndexer().GetByKey(key)
- if err != nil {
- return nil, err
- }
- if !exists {
- return nil, fmt.Errorf("failed to get object from cache")
- }
- return obj, nil
- }
- }
- return nil, fmt.Errorf("no owner found")
-}
diff --git a/controllers/common/informer_test.go b/controllers/common/informer_test.go
deleted file mode 100644
index 7b7f177b02..0000000000
--- a/controllers/common/informer_test.go
+++ /dev/null
@@ -1,191 +0,0 @@
-package common
-
-import (
- "fmt"
- "math/rand"
- "testing"
- "time"
-
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/client-go/informers"
- "k8s.io/client-go/kubernetes"
- "k8s.io/client-go/kubernetes/fake"
- "k8s.io/client-go/tools/cache"
- "k8s.io/client-go/util/workqueue"
-)
-
-func getFakePodSharedIndexInformer(clientset kubernetes.Interface) cache.SharedIndexInformer {
- // NewListWatchFromClient doesn't work with fake client.
- // ref: https://github.com/kubernetes/client-go/issues/352
- return cache.NewSharedIndexInformer(&cache.ListWatch{
- ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
- return clientset.CoreV1().Pods("").List(options)
- },
- WatchFunc: clientset.CoreV1().Pods("").Watch,
- }, &corev1.Pod{}, 1*time.Second, cache.Indexers{})
-}
-
-func getInformerFactory(clientset kubernetes.Interface, queue workqueue.RateLimitingInterface, done chan struct{}) *ArgoEventInformerFactory {
- informerFactory := informers.NewSharedInformerFactory(clientset, 0)
- ownerInformer := getFakePodSharedIndexInformer(clientset)
- go ownerInformer.Run(done)
- return &ArgoEventInformerFactory{
- OwnerGroupVersionKind: schema.GroupVersionKind{Version: "v1", Kind: "Pod"},
- OwnerInformer: ownerInformer,
- SharedInformerFactory: informerFactory,
- Queue: queue,
- }
-}
-
-func getCommonPodSpec() corev1.PodSpec {
- return corev1.PodSpec{
- Containers: []corev1.Container{
- {
- Name: "whalesay",
- Image: "docker/whalesay:latest",
- },
- },
- }
-}
-
-func getPod(owner *corev1.Pod) *corev1.Pod {
- var ownerReferneces = []metav1.OwnerReference{}
- if owner != nil {
- ownerReferneces = append(ownerReferneces, *metav1.NewControllerRef(owner, owner.GroupVersionKind()))
- }
- return &corev1.Pod{
- TypeMeta: metav1.TypeMeta{
- Kind: "Pod",
- APIVersion: "v1",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: fmt.Sprintf("pod-%d", rand.Uint32()),
- OwnerReferences: ownerReferneces,
- },
- Spec: getCommonPodSpec(),
- }
-}
-
-func getService(owner *corev1.Pod) *corev1.Service {
- var ownerReferneces = []metav1.OwnerReference{}
- if owner != nil {
- ownerReferneces = append(ownerReferneces, *metav1.NewControllerRef(owner, owner.GroupVersionKind()))
- }
- return &corev1.Service{
- TypeMeta: metav1.TypeMeta{
- Kind: "Service",
- APIVersion: "v1",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: fmt.Sprintf("svc-%d", rand.Uint32()),
- OwnerReferences: ownerReferneces,
- },
- Spec: corev1.ServiceSpec{},
- }
-}
-
-func TestNewPodInformer(t *testing.T) {
- done := make(chan struct{})
- queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
- defer queue.ShutDown()
- namespace := "namespace"
- clientset := fake.NewSimpleClientset()
- factory := getInformerFactory(clientset, queue, done)
- convey.Convey("Given an informer factory", t, func() {
- convey.Convey("Get a new gateway pod informer and make sure its not nil", func() {
- podInformer := factory.NewPodInformer()
- convey.So(podInformer, convey.ShouldNotBeNil)
-
- convey.Convey("Handle event", func() {
- go podInformer.Informer().Run(done)
- ownerPod := getPod(nil)
- ownerPod, err := clientset.CoreV1().Pods(namespace).Create(ownerPod)
- convey.So(err, convey.ShouldBeNil)
- cache.WaitForCacheSync(done, factory.OwnerInformer.HasSynced)
-
- convey.Convey("Not enqueue owner key on creation", func() {
- pod := getPod(ownerPod)
- pod, err := clientset.CoreV1().Pods(namespace).Create(pod)
- convey.So(err, convey.ShouldBeNil)
- cache.WaitForCacheSync(done, podInformer.Informer().HasSynced)
-
- convey.So(queue.Len(), convey.ShouldEqual, 0)
-
- convey.Convey("Not enqueue owner key on update", func() {
- pod.Labels = map[string]string{"foo": "bar"}
- _, err = clientset.CoreV1().Pods(namespace).Update(pod)
- convey.So(err, convey.ShouldBeNil)
- cache.WaitForCacheSync(done, podInformer.Informer().HasSynced)
-
- convey.So(queue.Len(), convey.ShouldEqual, 0)
- })
-
- convey.Convey("Enqueue owner key on deletion", func() {
- err := clientset.CoreV1().Pods(namespace).Delete(pod.Name, &metav1.DeleteOptions{})
- convey.So(err, convey.ShouldBeNil)
- cache.WaitForCacheSync(done, podInformer.Informer().HasSynced)
-
- convey.So(queue.Len(), convey.ShouldEqual, 1)
- key, _ := queue.Get()
- queue.Done(key)
- convey.So(key, convey.ShouldEqual, fmt.Sprintf("%s/%s", namespace, ownerPod.Name))
- })
- })
- })
- })
- })
-}
-
-func TestNewServiceInformer(t *testing.T) {
- done := make(chan struct{})
- queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
- defer queue.ShutDown()
- namespace := "namespace"
- clientset := fake.NewSimpleClientset()
- factory := getInformerFactory(clientset, queue, done)
- convey.Convey("Given an informer factory", t, func() {
- convey.Convey("Get a new gateway service informer and make sure its not nil", func() {
- svcInformer := factory.NewServiceInformer()
- convey.So(svcInformer, convey.ShouldNotBeNil)
-
- convey.Convey("Handle event", func() {
- go svcInformer.Informer().Run(done)
- ownerPod := getPod(nil)
- ownerPod, err := clientset.CoreV1().Pods(namespace).Create(ownerPod)
- convey.So(err, convey.ShouldBeNil)
- cache.WaitForCacheSync(done, factory.OwnerInformer.HasSynced)
-
- convey.Convey("Not enqueue owner key on creation", func() {
- service := getService(ownerPod)
- service, err := clientset.CoreV1().Services(namespace).Create(service)
- convey.So(err, convey.ShouldBeNil)
- cache.WaitForCacheSync(done, svcInformer.Informer().HasSynced)
- convey.So(queue.Len(), convey.ShouldEqual, 0)
-
- convey.Convey("Not enqueue owner key on update", func() {
- service.Labels = map[string]string{"foo": "bar"}
- service, err = clientset.CoreV1().Services(namespace).Update(service)
- convey.So(err, convey.ShouldBeNil)
- cache.WaitForCacheSync(done, svcInformer.Informer().HasSynced)
- convey.So(queue.Len(), convey.ShouldEqual, 0)
- })
-
- convey.Convey("Enqueue owner key on deletion", func() {
- err := clientset.CoreV1().Services(namespace).Delete(service.Name, &metav1.DeleteOptions{})
- convey.So(err, convey.ShouldBeNil)
- cache.WaitForCacheSync(done, svcInformer.Informer().HasSynced)
-
- convey.So(queue.Len(), convey.ShouldEqual, 1)
- key, _ := queue.Get()
- queue.Done(key)
- convey.So(key, convey.ShouldEqual, fmt.Sprintf("%s/%s", namespace, ownerPod.Name))
- })
- })
- })
- })
- })
-}
diff --git a/controllers/common/util.go b/controllers/common/util.go
index 112335890c..a7ad13462a 100644
--- a/controllers/common/util.go
+++ b/controllers/common/util.go
@@ -1,39 +1,50 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
package common
import (
"github.com/argoproj/argo-events/common"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/selection"
)
-// ChildResourceContext holds necessary information for child resource setup
-type ChildResourceContext struct {
- SchemaGroupVersionKind schema.GroupVersionKind
- LabelOwnerName string
- LabelKeyOwnerControllerInstanceID string
- AnnotationOwnerResourceHashName string
- InstanceID string
-}
-
// SetObjectMeta sets ObjectMeta of child resource
-func (ctx *ChildResourceContext) SetObjectMeta(owner, obj metav1.Object) error {
+func SetObjectMeta(owner, obj metav1.Object, gvk schema.GroupVersionKind) error {
references := obj.GetOwnerReferences()
references = append(references,
- *metav1.NewControllerRef(owner, ctx.SchemaGroupVersionKind),
+ *metav1.NewControllerRef(owner, gvk),
)
obj.SetOwnerReferences(references)
if obj.GetName() == "" && obj.GetGenerateName() == "" {
- obj.SetGenerateName(owner.GetName())
+ obj.SetName(owner.GetName())
+ }
+ if obj.GetNamespace() == "" {
+ obj.SetNamespace(owner.GetNamespace())
}
- labels := obj.GetLabels()
- if labels == nil {
- labels = make(map[string]string)
+ objLabels := obj.GetLabels()
+ if objLabels == nil {
+ objLabels = make(map[string]string)
}
- labels[ctx.LabelOwnerName] = owner.GetName()
- labels[ctx.LabelKeyOwnerControllerInstanceID] = ctx.InstanceID
- obj.SetLabels(labels)
+ objLabels[common.LabelOwnerName] = owner.GetName()
+ obj.SetLabels(objLabels)
hash, err := common.GetObjectHash(obj)
if err != nil {
@@ -43,8 +54,17 @@ func (ctx *ChildResourceContext) SetObjectMeta(owner, obj metav1.Object) error {
if annotations == nil {
annotations = make(map[string]string)
}
- annotations[ctx.AnnotationOwnerResourceHashName] = hash
+ annotations[common.AnnotationResourceSpecHash] = hash
obj.SetAnnotations(annotations)
return nil
}
+
+// OwnerLabelSelector returns label selector for a K8s resource by it's owner
+func OwnerLabelSelector(ownerName string) (labels.Selector, error) {
+ req, err := labels.NewRequirement(common.LabelResourceName, selection.Equals, []string{ownerName})
+ if err != nil {
+ return nil, err
+ }
+ return labels.NewSelector().Add(*req), nil
+}
diff --git a/controllers/common/util_test.go b/controllers/common/util_test.go
index 012e07eb9c..1f7526c22e 100644
--- a/controllers/common/util_test.go
+++ b/controllers/common/util_test.go
@@ -1,39 +1,49 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
package common
import (
"testing"
- "github.com/smartystreets/goconvey/convey"
+ "github.com/argoproj/argo-events/common"
+ "github.com/stretchr/testify/assert"
+ appv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime/schema"
)
func TestSetObjectMeta(t *testing.T) {
- convey.Convey("Given an object, set meta", t, func() {
- groupVersionKind := schema.GroupVersionKind{
- Group: "grp",
- Version: "ver",
- Kind: "kind",
- }
- ctx := ChildResourceContext{
- SchemaGroupVersionKind: groupVersionKind,
- LabelOwnerName: "foo",
- LabelKeyOwnerControllerInstanceID: "id",
- AnnotationOwnerResourceHashName: "hash",
- InstanceID: "ID",
- }
- owner := corev1.Pod{}
- pod := corev1.Pod{}
- ref := metav1.NewControllerRef(&owner, groupVersionKind)
-
- err := ctx.SetObjectMeta(&owner, &pod)
- convey.So(err, convey.ShouldBeEmpty)
- convey.So(pod.Labels["foo"], convey.ShouldEqual, "")
- convey.So(pod.Labels["id"], convey.ShouldEqual, "ID")
- convey.So(pod.Annotations, convey.ShouldContainKey, "hash")
- convey.So(pod.Name, convey.ShouldEqual, "")
- convey.So(pod.GenerateName, convey.ShouldEqual, "")
- convey.So(pod.OwnerReferences, convey.ShouldContain, *ref)
- })
+ owner := appv1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "fake-deployment",
+ Namespace: "fake-namespace",
+ },
+ }
+ pod := corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "fake-pod",
+ },
+ }
+
+ err := SetObjectMeta(&owner, &pod, owner.GroupVersionKind())
+ assert.Nil(t, err)
+ assert.Equal(t, "fake-namespace", pod.Namespace)
+ assert.Equal(t, owner.GroupVersionKind().Kind, pod.OwnerReferences[0].Kind)
+ assert.NotEmpty(t, pod.Annotations[common.AnnotationResourceSpecHash])
+ assert.NotEmpty(t, pod.Labels)
+ assert.Equal(t, owner.Name, pod.Labels[common.LabelOwnerName])
}
diff --git a/cmd/controllers/gateway/main.go b/controllers/gateway/cmd/main.go
similarity index 82%
rename from cmd/controllers/gateway/main.go
rename to controllers/gateway/cmd/main.go
index c489d84054..be79cdd645 100644
--- a/cmd/controllers/gateway/main.go
+++ b/controllers/gateway/cmd/main.go
@@ -33,12 +33,12 @@ func main() {
}
// gateway-controller configuration
- configMap, ok := os.LookupEnv(common.EnvVarGatewayControllerConfigMap)
+ configMap, ok := os.LookupEnv(common.EnvVarControllerConfigMap)
if !ok {
- configMap = common.DefaultConfigMapName(common.LabelGatewayControllerName)
+ panic("controller configmap is not provided")
}
- namespace, ok := os.LookupEnv(common.EnvVarGatewayNamespace)
+ namespace, ok := os.LookupEnv(common.EnvVarNamespace)
if !ok {
namespace = common.DefaultControllerNamespace
}
@@ -51,6 +51,6 @@ func main() {
panic(err)
}
- go controller.Run(context.Background(), 1, 1)
+ go controller.Run(context.Background(), 1)
select {}
}
diff --git a/gateways/community/gcp-pubsub/config_test.go b/controllers/gateway/common.go
similarity index 54%
rename from gateways/community/gcp-pubsub/config_test.go
rename to controllers/gateway/common.go
index f816c79722..a8d22c43ec 100644
--- a/gateways/community/gcp-pubsub/config_test.go
+++ b/controllers/gateway/common.go
@@ -14,24 +14,14 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package pubsub
+package gateway
-import (
- "github.com/smartystreets/goconvey/convey"
- "testing"
-)
-
-var es = `
-projectID: "1234"
-topic: "test"
-`
+import "github.com/argoproj/argo-events/pkg/apis/gateway"
-func TestParseConfig(t *testing.T) {
- convey.Convey("Given a gcp-pubsub event source, parse it", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
- _, ok := ps.(*pubSubEventSource)
- convey.So(ok, convey.ShouldEqual, true)
- })
-}
+// Labels
+const (
+ //LabelKeyGatewayControllerInstanceID is the label which allows to separate application among multiple running controller controllers.
+ LabelControllerInstanceID = gateway.FullName + "/gateway-controller-instanceid"
+ // LabelGatewayKeyPhase is a label applied to gateways to indicate the current phase of the controller (for filtering purposes)
+ LabelPhase = gateway.FullName + "/phase"
+)
diff --git a/controllers/gateway/config.go b/controllers/gateway/config.go
index 9dddaf0518..6d32e15398 100644
--- a/controllers/gateway/config.go
+++ b/controllers/gateway/config.go
@@ -22,6 +22,7 @@ import (
"github.com/argoproj/argo-events/common"
"github.com/ghodss/yaml"
+ "github.com/pkg/errors"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
@@ -31,8 +32,8 @@ import (
)
// watches configuration for gateway controller
-func (c *GatewayController) watchControllerConfigMap(ctx context.Context) (cache.Controller, error) {
- c.log.Info("watching gateway-controller config map updates")
+func (c *Controller) watchControllerConfigMap(ctx context.Context) (cache.Controller, error) {
+ c.logger.Infoln("watching gateway-controller config map updates")
source := c.newControllerConfigMapWatch()
_, controller := cache.NewInformer(
source,
@@ -41,19 +42,19 @@ func (c *GatewayController) watchControllerConfigMap(ctx context.Context) (cache
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
if cm, ok := obj.(*apiv1.ConfigMap); ok {
- c.log.Info("detected EventSource update. updating the gateway-controller config.")
+ c.logger.Info("detected new gateway controller configmap")
err := c.updateConfig(cm)
if err != nil {
- c.log.Error("update of config failed", "err", err)
+ c.logger.WithError(err).Errorln("update of gateway controller configuration failed")
}
}
},
UpdateFunc: func(old, new interface{}) {
if newCm, ok := new.(*apiv1.ConfigMap); ok {
- c.log.Info("detected EventSource update. updating the gateway-controller config.")
+ c.logger.Infoln("detected gateway controller configmap update.")
err := c.updateConfig(newCm)
if err != nil {
- c.log.WithError(err).Error("update of config failed")
+ c.logger.WithError(err).Errorln("update of gateway controller configuration failed")
}
}
},
@@ -64,8 +65,8 @@ func (c *GatewayController) watchControllerConfigMap(ctx context.Context) (cache
}
// creates a new config map watcher
-func (c *GatewayController) newControllerConfigMapWatch() *cache.ListWatch {
- x := c.kubeClientset.CoreV1().RESTClient()
+func (c *Controller) newControllerConfigMapWatch() *cache.ListWatch {
+ x := c.k8sClient.CoreV1().RESTClient()
resource := "configmaps"
name := c.ConfigMap
fieldSelector := fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", name))
@@ -91,8 +92,8 @@ func (c *GatewayController) newControllerConfigMapWatch() *cache.ListWatch {
}
// ResyncConfig reloads the gateway-controller config from the configmap
-func (c *GatewayController) ResyncConfig(namespace string) error {
- cmClient := c.kubeClientset.CoreV1().ConfigMaps(namespace)
+func (c *Controller) ResyncConfig(namespace string) error {
+ cmClient := c.k8sClient.CoreV1().ConfigMaps(namespace)
cm, err := cmClient.Get(c.ConfigMap, metav1.GetOptions{})
if err != nil {
return err
@@ -101,12 +102,12 @@ func (c *GatewayController) ResyncConfig(namespace string) error {
}
// updates the gateway controller configuration
-func (c *GatewayController) updateConfig(cm *apiv1.ConfigMap) error {
- configStr, ok := cm.Data[common.GatewayControllerConfigMapKey]
+func (c *Controller) updateConfig(cm *apiv1.ConfigMap) error {
+ configStr, ok := cm.Data[common.ControllerConfigMapKey]
if !ok {
- return fmt.Errorf("configMap '%s' does not have key '%s'", c.ConfigMap, common.GatewayControllerConfigMapKey)
+ return errors.Errorf("configMap '%s' does not have key '%s'", c.ConfigMap, common.ControllerConfigMapKey)
}
- var config GatewayControllerConfig
+ var config ControllerConfig
err := yaml.Unmarshal([]byte(configStr), &config)
if err != nil {
return err
diff --git a/controllers/gateway/config_test.go b/controllers/gateway/config_test.go
index 508e7d2e2c..5257e7bd74 100644
--- a/controllers/gateway/config_test.go
+++ b/controllers/gateway/config_test.go
@@ -20,7 +20,7 @@ import (
"testing"
"github.com/argoproj/argo-events/common"
- "github.com/smartystreets/goconvey/convey"
+ "github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -29,47 +29,24 @@ var (
configmapName = common.DefaultConfigMapName("gateway-controller")
)
-func TestGatewayControllerConfigWatch(t *testing.T) {
- gc := getGatewayController()
-
- convey.Convey("Given a gateway", t, func() {
- convey.Convey("Create a new watch and make sure watcher is not nil", func() {
- watcher := gc.newControllerConfigMapWatch()
- convey.So(watcher, convey.ShouldNotBeNil)
- })
- })
-
- convey.Convey("Given a gateway, resync config", t, func() {
- convey.Convey("Update a gateway configmap with new instance id and remove namespace", func() {
- cmObj := &corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: common.DefaultControllerNamespace,
- Name: gc.ConfigMap,
- },
- Data: map[string]string{
- common.GatewayControllerConfigMapKey: `instanceID: fake-instance-id`,
- },
- }
- cm, err := gc.kubeClientset.CoreV1().ConfigMaps(gc.Namespace).Create(cmObj)
- convey.Convey("Make sure no error occurs", func() {
- convey.So(err, convey.ShouldBeNil)
-
- convey.Convey("Updated gateway configmap must be non-nil", func() {
- convey.So(cm, convey.ShouldNotBeNil)
-
- convey.Convey("Resync the gateway configuration", func() {
- err := gc.ResyncConfig(cmObj.Namespace)
- convey.Convey("No error should occur while resyncing gateway configuration", func() {
- convey.So(err, convey.ShouldBeNil)
-
- convey.Convey("The updated instance id must be fake-instance-id", func() {
- convey.So(gc.Config.InstanceID, convey.ShouldEqual, "fake-instance-id")
- convey.So(gc.Config.Namespace, convey.ShouldBeEmpty)
- })
- })
- })
- })
- })
- })
- })
+func TestController_ResyncConfig(t *testing.T) {
+ controller := newController()
+
+ cmObj := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: common.DefaultControllerNamespace,
+ Name: controller.ConfigMap,
+ },
+ Data: map[string]string{
+ common.ControllerConfigMapKey: `instanceID: fake-instance-id`,
+ },
+ }
+
+ cm, err := controller.k8sClient.CoreV1().ConfigMaps(controller.Namespace).Create(cmObj)
+ assert.Nil(t, err)
+ assert.NotNil(t, cm)
+ err = controller.ResyncConfig(common.DefaultControllerNamespace)
+ assert.Nil(t, err)
+ assert.Equal(t, controller.Config.Namespace, "")
+ assert.Equal(t, controller.Config.InstanceID, "fake-instance-id")
}
diff --git a/controllers/gateway/controller.go b/controllers/gateway/controller.go
index 6177283158..c5f88662a4 100644
--- a/controllers/gateway/controller.go
+++ b/controllers/gateway/controller.go
@@ -20,18 +20,14 @@ import (
"context"
"errors"
"fmt"
- "github.com/sirupsen/logrus"
"time"
base "github.com/argoproj/argo-events"
"github.com/argoproj/argo-events/common"
- ccommon "github.com/argoproj/argo-events/controllers/common"
"github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
clientset "github.com/argoproj/argo-events/pkg/client/gateway/clientset/versioned"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
+ "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/wait"
- "k8s.io/client-go/informers"
informersv1 "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
@@ -40,37 +36,35 @@ import (
)
const (
- gatewayResyncPeriod = 20 * time.Minute
- gatewayResourceResyncPeriod = 30 * time.Minute
- rateLimiterBaseDelay = 5 * time.Second
- rateLimiterMaxDelay = 1000 * time.Second
+ gatewayResyncPeriod = 20 * time.Minute
+ rateLimiterBaseDelay = 5 * time.Second
+ rateLimiterMaxDelay = 1000 * time.Second
)
-// GatewayControllerConfig contain the configuration settings for the gateway-controller
-type GatewayControllerConfig struct {
- // InstanceID is a label selector to limit the gateway-controller's watch of gateway jobs to a specific instance.
+// ControllerConfig contain the configuration settings for the controller
+type ControllerConfig struct {
+ // InstanceID is a label selector to limit the controller's watch of gateway to a specific instance.
InstanceID string
-
- // Namespace is a label selector filter to limit gateway-controller-controller's watch to specific namespace
+ // Namespace is a label selector filter to limit controller's watch to specific namespace
Namespace string
}
-// GatewayController listens for new gateways and hands off handling of each gateway-controller on the queue to the operator
-type GatewayController struct {
- // EventSource is the name of the config map in which to derive configuration of the contoller
+// Controller listens for new gateways and hands off handling of each gateway controller on the queue to the operator
+type Controller struct {
+ // ConfigMap is the name of the config map in which to derive configuration of the controller
ConfigMap string
- // Namespace for gateway controller
+ // Namespace for controller
Namespace string
- // Config is the gateway-controller gateway-controller-controller's configuration
- Config GatewayControllerConfig
- // log is the logger for a gateway
- log *logrus.Logger
-
- // kubernetes config and apis
- kubeConfig *rest.Config
- kubeClientset kubernetes.Interface
- gatewayClientset clientset.Interface
-
+ // Config is the controller's configuration
+ Config ControllerConfig
+ // logger to logger stuff
+ logger *logrus.Logger
+ // K8s rest config
+ kubeConfig *rest.Config
+ // k8sClient is Kubernetes client
+ k8sClient kubernetes.Interface
+ // gatewayClient is the Argo-Events gateway resource client
+ gatewayClient clientset.Interface
// gateway-controller informer and queue
podInformer informersv1.PodInformer
svcInformer informersv1.ServiceInformer
@@ -78,21 +72,22 @@ type GatewayController struct {
queue workqueue.RateLimitingInterface
}
-// NewGatewayController creates a new Controller
-func NewGatewayController(rest *rest.Config, configMap, namespace string) *GatewayController {
+// NewGatewayController creates a new controller
+func NewGatewayController(rest *rest.Config, configMap, namespace string) *Controller {
rateLimiter := workqueue.NewItemExponentialFailureRateLimiter(rateLimiterBaseDelay, rateLimiterMaxDelay)
- return &GatewayController{
- ConfigMap: configMap,
- Namespace: namespace,
- kubeConfig: rest,
- log: common.NewArgoEventsLogger(),
- kubeClientset: kubernetes.NewForConfigOrDie(rest),
- gatewayClientset: clientset.NewForConfigOrDie(rest),
- queue: workqueue.NewRateLimitingQueue(rateLimiter),
+ return &Controller{
+ ConfigMap: configMap,
+ Namespace: namespace,
+ kubeConfig: rest,
+ logger: common.NewArgoEventsLogger(),
+ k8sClient: kubernetes.NewForConfigOrDie(rest),
+ gatewayClient: clientset.NewForConfigOrDie(rest),
+ queue: workqueue.NewRateLimitingQueue(rateLimiter),
}
}
-func (c *GatewayController) processNextItem() bool {
+// processNextItem processes a gateway resource on the controller's queue
+func (c *Controller) processNextItem() bool {
// Wait until there is a new item in the queue
key, quit := c.queue.Get()
if quit {
@@ -102,26 +97,26 @@ func (c *GatewayController) processNextItem() bool {
obj, exists, err := c.informer.GetIndexer().GetByKey(key.(string))
if err != nil {
- c.log.WithField(common.LabelGatewayName, key.(string)).WithError(err).Warn("failed to get gateway from informer index")
+ c.logger.WithField(common.LabelResourceName, key.(string)).WithError(err).Warnln("failed to get gateway from informer index")
return true
}
if !exists {
- // this happens after gateway-controller was deleted, but work queue still had entry in it
+ // this happens after controller was deleted, but work queue still had entry in it
return true
}
gw, ok := obj.(*v1alpha1.Gateway)
if !ok {
- c.log.WithField(common.LabelGatewayName, key.(string)).WithError(err).Warn("key in index is not a gateway")
+ c.logger.WithField(common.LabelResourceName, key.(string)).WithError(err).Warnln("key in index is not a gateway")
return true
}
- ctx := newGatewayOperationCtx(gw, c)
+ ctx := newGatewayContext(gw, c)
err = ctx.operate()
if err != nil {
- if err := common.GenerateK8sEvent(c.kubeClientset,
+ if err := common.GenerateK8sEvent(c.k8sClient,
fmt.Sprintf("controller failed to operate on gateway %s", gw.Name),
common.StateChangeEventType,
"controller operation failed",
@@ -130,25 +125,25 @@ func (c *GatewayController) processNextItem() bool {
c.Config.InstanceID,
gw.Kind,
map[string]string{
- common.LabelGatewayName: gw.Name,
- common.LabelEventType: string(common.EscalationEventType),
+ common.LabelResourceName: gw.Name,
+ common.LabelEventType: string(common.EscalationEventType),
},
); err != nil {
- ctx.log.WithError(err).Error("failed to create K8s event to escalate controller operation failure")
+ ctx.logger.WithError(err).Errorln("failed to create K8s event to escalate controller operation failure")
}
}
err = c.handleErr(err, key)
// create k8 event to escalate the error
if err != nil {
- ctx.log.WithError(err).Error("gateway controller failed to handle error")
+ ctx.logger.WithError(err).Errorln("controller failed to handle error")
}
return true
}
// handleErr checks if an error happened and make sure we will retry later
// returns an error if unable to handle the error
-func (c *GatewayController) handleErr(err error, key interface{}) error {
+func (c *Controller) handleErr(err error, key interface{}) error {
if err == nil {
// Forget about the #AddRateLimited history of key on every successful sync
// Ensure future updates for this key are not delayed because of outdated error history
@@ -160,7 +155,7 @@ func (c *GatewayController) handleErr(err error, key interface{}) error {
// requeues will happen very quickly even after a gateway pod goes down
// we want to give the event pod a chance to come back up so we give a generous number of retries
if c.queue.NumRequeues(key) < 20 {
- c.log.WithField(common.LabelGatewayName, key.(string)).WithError(err).Error("error syncing gateway")
+ c.logger.WithField(common.LabelResourceName, key.(string)).WithError(err).Errorln("error syncing gateway")
// Re-enqueue the key rate limited. This key will be processed later again.
c.queue.AddRateLimited(key)
@@ -169,63 +164,41 @@ func (c *GatewayController) handleErr(err error, key interface{}) error {
return errors.New("exceeded max requeues")
}
-// Run executes the gateway-controller
-func (c *GatewayController) Run(ctx context.Context, gwThreads, eventThreads int) {
+// Run processes the gateway resources on the controller's queue
+func (c *Controller) Run(ctx context.Context, threads int) {
defer c.queue.ShutDown()
- c.log.WithFields(
+ c.logger.WithFields(
map[string]interface{}{
common.LabelInstanceID: c.Config.InstanceID,
common.LabelVersion: base.GetVersion().Version,
- }).Info("starting gateway-controller")
+ }).Infoln("starting controller")
+
_, err := c.watchControllerConfigMap(ctx)
if err != nil {
- c.log.WithError(err).Error("failed to register watch for gateway-controller config map")
- return
- }
-
- c.informer = c.newGatewayInformer()
- go c.informer.Run(ctx.Done())
-
- if !cache.WaitForCacheSync(ctx.Done(), c.informer.HasSynced) {
- c.log.Panicf("timed out waiting for the caches to sync for gateways")
+ c.logger.WithError(err).Errorln("failed to register watch for controller config map")
return
}
- listOptionsFunc := func(options *metav1.ListOptions) {
- labelSelector := labels.NewSelector().Add(c.instanceIDReq())
- options.LabelSelector = labelSelector.String()
- }
- factory := ccommon.ArgoEventInformerFactory{
- OwnerGroupVersionKind: v1alpha1.SchemaGroupVersionKind,
- OwnerInformer: c.informer,
- SharedInformerFactory: informers.NewFilteredSharedInformerFactory(c.kubeClientset, gatewayResourceResyncPeriod, c.Config.Namespace, listOptionsFunc),
- Queue: c.queue,
- }
-
- c.podInformer = factory.NewPodInformer()
- go c.podInformer.Informer().Run(ctx.Done())
-
- if !cache.WaitForCacheSync(ctx.Done(), c.podInformer.Informer().HasSynced) {
- c.log.Panic("timed out waiting for the caches to sync for gateway pods")
- return
+ c.informer, err = c.newGatewayInformer()
+ if err != nil {
+ panic(err)
}
- c.svcInformer = factory.NewServiceInformer()
- go c.svcInformer.Informer().Run(ctx.Done())
+ go c.informer.Run(ctx.Done())
- if !cache.WaitForCacheSync(ctx.Done(), c.svcInformer.Informer().HasSynced) {
- c.log.Panic("timed out waiting for the caches to sync for gateway services")
+ if !cache.WaitForCacheSync(ctx.Done(), c.informer.HasSynced) {
+ c.logger.Errorln("timed out waiting for the caches to sync for gateways")
return
}
- for i := 0; i < gwThreads; i++ {
+ for i := 0; i < threads; i++ {
go wait.Until(c.runWorker, time.Second, ctx.Done())
}
<-ctx.Done()
}
-func (c *GatewayController) runWorker() {
+func (c *Controller) runWorker() {
for c.processNextItem() {
}
}
diff --git a/controllers/gateway/controller_test.go b/controllers/gateway/controller_test.go
index ab9f4cb918..c4278e2a78 100644
--- a/controllers/gateway/controller_test.go
+++ b/controllers/gateway/controller_test.go
@@ -19,99 +19,67 @@ package gateway
import (
"fmt"
"testing"
- "time"
"github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
fakegateway "github.com/argoproj/argo-events/pkg/client/gateway/clientset/versioned/fake"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
+ "github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/client-go/informers"
- "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
- "k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
-func getFakePodSharedIndexInformer(clientset kubernetes.Interface) cache.SharedIndexInformer {
- // NewListWatchFromClient doesn't work with fake client.
- // ref: https://github.com/kubernetes/client-go/issues/352
- return cache.NewSharedIndexInformer(&cache.ListWatch{
- ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
- return clientset.CoreV1().Pods("").List(options)
- },
- WatchFunc: clientset.CoreV1().Pods("").Watch,
- }, &corev1.Pod{}, 1*time.Second, cache.Indexers{})
-}
-
-func getGatewayController() *GatewayController {
- clientset := fake.NewSimpleClientset()
- done := make(chan struct{})
- informer := getFakePodSharedIndexInformer(clientset)
- go informer.Run(done)
- factory := informers.NewSharedInformerFactory(clientset, 0)
- podInformer := factory.Core().V1().Pods()
- go podInformer.Informer().Run(done)
- svcInformer := factory.Core().V1().Services()
- go svcInformer.Informer().Run(done)
- return &GatewayController{
+func newController() *Controller {
+ controller := &Controller{
ConfigMap: configmapName,
Namespace: common.DefaultControllerNamespace,
- Config: GatewayControllerConfig{
+ Config: ControllerConfig{
Namespace: common.DefaultControllerNamespace,
InstanceID: "argo-events",
},
- kubeClientset: clientset,
- gatewayClientset: fakegateway.NewSimpleClientset(),
- podInformer: podInformer,
- svcInformer: svcInformer,
- informer: informer,
- queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
- log: common.NewArgoEventsLogger(),
+ k8sClient: fake.NewSimpleClientset(),
+ gatewayClient: fakegateway.NewSimpleClientset(),
+ queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
+ logger: common.NewArgoEventsLogger(),
}
+ informer, err := controller.newGatewayInformer()
+ if err != nil {
+ panic(err)
+ }
+ controller.informer = informer
+ return controller
}
-func TestGatewayController(t *testing.T) {
- convey.Convey("Given a gateway controller, process queue items", t, func() {
- controller := getGatewayController()
-
- convey.Convey("Create a resource queue, add new item and process it", func() {
- controller.queue = workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
- controller.informer = controller.newGatewayInformer()
- controller.queue.Add("hi")
- res := controller.processNextItem()
+func TestGatewayController_ProcessNextItem(t *testing.T) {
+ controller := newController()
+ controller.queue = workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
+ gw := &v1alpha1.Gateway{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "fake-gateway",
+ Namespace: common.DefaultControllerNamespace,
+ },
+ Spec: v1alpha1.GatewaySpec{},
+ }
+ err := controller.informer.GetIndexer().Add(gw)
+ assert.Nil(t, err)
- convey.Convey("Item from queue must be successfully processed", func() {
- convey.So(res, convey.ShouldBeTrue)
- })
+ controller.queue.Add("fake-gateway")
+ res := controller.processNextItem()
+ assert.Equal(t, res, true)
- convey.Convey("Shutdown queue and make sure queue does not process next item", func() {
- controller.queue.ShutDown()
- res := controller.processNextItem()
- convey.So(res, convey.ShouldBeFalse)
- })
- })
- })
+ controller.queue.ShutDown()
+ res = controller.processNextItem()
+ assert.Equal(t, res, false)
+}
- convey.Convey("Given a gateway controller, handle errors in queue", t, func() {
- controller := getGatewayController()
- convey.Convey("Create a resource queue and add an item", func() {
- controller.queue = workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
- controller.queue.Add("hi")
- convey.Convey("Handle an nil error", func() {
- err := controller.handleErr(nil, "hi")
- convey.So(err, convey.ShouldBeNil)
- })
- convey.Convey("Exceed max requeues", func() {
- controller.queue.Add("bye")
- var err error
- for i := 0; i < 21; i++ {
- err = controller.handleErr(fmt.Errorf("real error"), "bye")
- }
- convey.So(err, convey.ShouldNotBeNil)
- convey.So(err.Error(), convey.ShouldEqual, "exceeded max requeues")
- })
- })
- })
+func TestGatewayController_HandleErr(t *testing.T) {
+ controller := newController()
+ controller.queue = workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
+ controller.queue.Add("hi")
+ var err error
+ for i := 0; i < 21; i++ {
+ err = controller.handleErr(fmt.Errorf("real error"), "bye")
+ }
+ assert.NotNil(t, err)
+ assert.Equal(t, err.Error(), "exceeded max requeues")
}
diff --git a/controllers/gateway/informer.go b/controllers/gateway/informer.go
index e555f87ed2..45fc5302c9 100644
--- a/controllers/gateway/informer.go
+++ b/controllers/gateway/informer.go
@@ -17,48 +17,41 @@ limitations under the License.
package gateway
import (
- "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/client-go/tools/cache"
- "github.com/argoproj/argo-events/common"
gatewayinformers "github.com/argoproj/argo-events/pkg/client/gateway/informers/externalversions"
)
-func (c *GatewayController) instanceIDReq() labels.Requirement {
- // it makes sense to make instance id is mandatory.
+func (c *Controller) instanceIDReq() (*labels.Requirement, error) {
if c.Config.InstanceID == "" {
panic("instance id is required")
}
- instanceIDReq, err := labels.NewRequirement(common.LabelKeyGatewayControllerInstanceID, selection.Equals, []string{c.Config.InstanceID})
+ instanceIDReq, err := labels.NewRequirement(LabelControllerInstanceID, selection.Equals, []string{c.Config.InstanceID})
if err != nil {
- panic(err)
+ return nil, err
}
- return *instanceIDReq
+ c.logger.WithField("instance-id", instanceIDReq.String()).Infoln("instance id requirement")
+ return instanceIDReq, nil
}
-func (c *GatewayController) versionReq() labels.Requirement {
- versionReq, err := labels.NewRequirement(common.LabelArgoEventsGatewayVersion, selection.Equals, []string{v1alpha1.ArgoEventsGatewayVersion})
+// The controller informer adds new gateways to the controller's queue based on Add, Update, and Delete Event Handlers for the gateway Resources
+func (c *Controller) newGatewayInformer() (cache.SharedIndexInformer, error) {
+ labelSelector, err := c.instanceIDReq()
if err != nil {
- panic(err)
+ return nil, err
}
- return *versionReq
-}
-
-// The gateway-controller informer adds new Gateways to the gateway-controller-controller's queue based on Add, Update, and Delete Event Handlers for the Gateway Resources
-func (c *GatewayController) newGatewayInformer() cache.SharedIndexInformer {
- gatewayInformerFactory := gatewayinformers.NewFilteredSharedInformerFactory(
- c.gatewayClientset,
+ gatewayInformerFactory := gatewayinformers.NewSharedInformerFactoryWithOptions(
+ c.gatewayClient,
gatewayResyncPeriod,
- c.Config.Namespace,
- func(options *metav1.ListOptions) {
+ gatewayinformers.WithNamespace(c.Config.Namespace),
+ gatewayinformers.WithTweakListOptions(func(options *metav1.ListOptions) {
options.FieldSelector = fields.Everything().String()
- labelSelector := labels.NewSelector().Add(c.instanceIDReq(), c.versionReq())
options.LabelSelector = labelSelector.String()
- },
+ }),
)
informer := gatewayInformerFactory.Argoproj().V1alpha1().Gateways().Informer()
informer.AddEventHandler(
@@ -83,5 +76,5 @@ func (c *GatewayController) newGatewayInformer() cache.SharedIndexInformer {
},
},
)
- return informer
+ return informer, nil
}
diff --git a/controllers/gateway/informer_test.go b/controllers/gateway/informer_test.go
index bc9e58fe47..17f18e8706 100644
--- a/controllers/gateway/informer_test.go
+++ b/controllers/gateway/informer_test.go
@@ -17,29 +17,29 @@ limitations under the License.
package gateway
import (
+ "fmt"
"testing"
- "github.com/argoproj/argo-events/common"
- "github.com/smartystreets/goconvey/convey"
+ "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
+ "github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/selection"
)
-func TestInformer(t *testing.T) {
- convey.Convey("Given a gateway controller", t, func() {
- controller := getGatewayController()
- convey.Convey("Instance ID required key must match", func() {
- req := controller.instanceIDReq()
- convey.So(req.Key(), convey.ShouldEqual, common.LabelKeyGatewayControllerInstanceID)
- convey.So(req.Operator(), convey.ShouldEqual, selection.Equals)
- convey.So(req.Values().Has("argo-events"), convey.ShouldBeTrue)
- })
- })
-
- convey.Convey("Given a gateway controller", t, func() {
- controller := getGatewayController()
- convey.Convey("Get a new gateway informer and make sure its not nil", func() {
- i := controller.newGatewayInformer()
- convey.So(i, convey.ShouldNotBeNil)
- })
- })
+func TestInformer_InstanceIDReq(t *testing.T) {
+ controller := newController()
+ req, err := controller.instanceIDReq()
+ assert.Nil(t, err)
+ assert.Equal(t, req.Key(), LabelControllerInstanceID)
+ assert.Equal(t, req.Operator(), selection.Equals)
+ assert.Equal(t, req.Values().Has("argo-events"), true)
+ assert.Equal(t, req.String(), fmt.Sprintf("%s=%s", LabelControllerInstanceID, "argo-events"))
+}
+
+func TestInformer_NewInformer(t *testing.T) {
+ controller := newController()
+ i, err := controller.newGatewayInformer()
+ assert.Nil(t, err)
+ assert.NotNil(t, i)
+ err = i.GetIndexer().Add(&v1alpha1.Gateway{})
+ assert.Nil(t, err)
}
diff --git a/controllers/gateway/operator.go b/controllers/gateway/operator.go
index da890be6f4..e4de8f960c 100644
--- a/controllers/gateway/operator.go
+++ b/controllers/gateway/operator.go
@@ -17,343 +17,207 @@ limitations under the License.
package gateway
import (
- "github.com/sirupsen/logrus"
"time"
- "github.com/pkg/errors"
-
"github.com/argoproj/argo-events/common"
"github.com/argoproj/argo-events/pkg/apis/gateway"
"github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
- corev1 "k8s.io/api/core/v1"
+ gwclient "github.com/argoproj/argo-events/pkg/client/gateway/clientset/versioned"
+ "github.com/sirupsen/logrus"
+ "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/wait"
)
-// the context of an operation on a gateway-controller.
-// the gateway-controller-controller creates this context each time it picks a Gateway off its queue.
-type gwOperationCtx struct {
- // gw is the gateway-controller object
- gw *v1alpha1.Gateway
- // updated indicates whether the gateway-controller object was updated and needs to be persisted back to k8
+// the context of an operation in the controller.
+// the controller creates this context each time it picks a Gateway off its queue.
+type gatewayContext struct {
+ // gateway is the controller object
+ gateway *v1alpha1.Gateway
+ // updated indicates whether the controller object was updated and needs to be persisted back to k8
updated bool
- // log is the logger for a gateway
- log *logrus.Logger
- // reference to the gateway-controller-controller
- controller *GatewayController
- // gwrctx is the context to handle child resource
- gwrctx gwResourceCtx
+ // logger is the logger for a gateway
+ logger *logrus.Logger
+ // reference to the controller
+ controller *Controller
}
-// newGatewayOperationCtx creates and initializes a new gOperationCtx object
-func newGatewayOperationCtx(gw *v1alpha1.Gateway, controller *GatewayController) *gwOperationCtx {
- gw = gw.DeepCopy()
- return &gwOperationCtx{
- gw: gw,
+// newGatewayContext creates and initializes a new gatewayContext object
+func newGatewayContext(gatewayObj *v1alpha1.Gateway, controller *Controller) *gatewayContext {
+ gatewayObj = gatewayObj.DeepCopy()
+ return &gatewayContext{
+ gateway: gatewayObj,
updated: false,
- log: common.NewArgoEventsLogger().WithFields(
+ logger: common.NewArgoEventsLogger().WithFields(
map[string]interface{}{
- common.LabelGatewayName: gw.Name,
- common.LabelNamespace: gw.Namespace,
+ common.LabelResourceName: gatewayObj.Name,
+ common.LabelNamespace: gatewayObj.Namespace,
}).Logger,
controller: controller,
- gwrctx: NewGatewayResourceContext(gw, controller),
}
}
// operate checks the status of gateway resource and takes action based on it.
-func (goc *gwOperationCtx) operate() error {
- defer func() {
- if goc.updated {
- var err error
- eventType := common.StateChangeEventType
- labels := map[string]string{
- common.LabelGatewayName: goc.gw.Name,
- common.LabelGatewayKeyPhase: string(goc.gw.Status.Phase),
- common.LabelKeyGatewayControllerInstanceID: goc.controller.Config.InstanceID,
- common.LabelOperation: "persist_gateway_state",
- }
- goc.gw, err = PersistUpdates(goc.controller.gatewayClientset, goc.gw, goc.log)
- if err != nil {
- goc.log.WithError(err).Error("failed to persist gateway update, escalating...")
- // escalate
- eventType = common.EscalationEventType
- }
+func (ctx *gatewayContext) operate() error {
+ defer ctx.updateGatewayState()
- labels[common.LabelEventType] = string(eventType)
- if err := common.GenerateK8sEvent(goc.controller.kubeClientset,
- "persist update",
- eventType,
- "gateway state update",
- goc.gw.Name,
- goc.gw.Namespace,
- goc.controller.Config.InstanceID,
- gateway.Kind,
- labels,
- ); err != nil {
- goc.log.WithError(err).Error("failed to create K8s event to log gateway state persist operation")
- return
- }
- goc.log.Info("successfully persisted gateway resource update and created K8s event")
- }
- goc.updated = false
- }()
+ ctx.logger.WithField(common.LabelPhase, string(ctx.gateway.Status.Phase)).Infoln("operating on the gateway...")
- goc.log.WithField(common.LabelPhase, string(goc.gw.Status.Phase)).Info("operating on the gateway")
+ if err := Validate(ctx.gateway); err != nil {
+ ctx.logger.WithError(err).Infoln("invalid gateway object")
+ return err
+ }
// check the state of a gateway and take actions accordingly
- switch goc.gw.Status.Phase {
+ switch ctx.gateway.Status.Phase {
case v1alpha1.NodePhaseNew:
- err := goc.createGatewayResources()
- if err != nil {
+ if err := ctx.createGatewayResources(); err != nil {
+ ctx.logger.WithError(err).Errorln("failed to create resources for the gateway")
+ ctx.markGatewayPhase(v1alpha1.NodePhaseError, err.Error())
return err
}
+ ctx.logger.Infoln("marking gateway as active")
+ ctx.markGatewayPhase(v1alpha1.NodePhaseRunning, "gateway is active")
- // Gateway is in error
- case v1alpha1.NodePhaseError:
- goc.log.Error("gateway is in error state. please check escalated K8 event for the error")
- err := goc.updateGatewayResources()
+ // Gateway is already running
+ case v1alpha1.NodePhaseRunning:
+ ctx.logger.Infoln("gateway is running")
+ err := ctx.updateGatewayResources()
if err != nil {
+ ctx.logger.WithError(err).Errorln("failed to update resources for the gateway")
+ ctx.markGatewayPhase(v1alpha1.NodePhaseError, err.Error())
return err
}
+ ctx.updated = true
- // Gateway is already running, do nothing
- case v1alpha1.NodePhaseRunning:
- goc.log.Info("gateway is running")
- err := goc.updateGatewayResources()
+ // Gateway is in error
+ case v1alpha1.NodePhaseError:
+ ctx.logger.Errorln("gateway is in error state. checking updates for gateway object...")
+ err := ctx.updateGatewayResources()
if err != nil {
+ ctx.logger.WithError(err).Errorln("failed to update resources for the gateway")
return err
}
+ ctx.markGatewayPhase(v1alpha1.NodePhaseRunning, "gateway is now active")
default:
- goc.log.WithField(common.LabelPhase, string(goc.gw.Status.Phase)).Panic("unknown gateway phase.")
+ ctx.logger.WithField(common.LabelPhase, string(ctx.gateway.Status.Phase)).Errorln("unknown gateway phase")
}
return nil
}
-func (goc *gwOperationCtx) createGatewayResources() error {
- err := Validate(goc.gw)
- if err != nil {
- goc.log.WithError(err).Error("gateway validation failed")
- err = errors.Wrap(err, "failed to validate gateway")
- goc.markGatewayPhase(v1alpha1.NodePhaseError, err.Error())
- return err
- }
- // Gateway pod has two components,
- // 1) Gateway Server - Listen events from event source and dispatches the event to gateway client
- // 2) Gateway Client - Listens for events from gateway server, convert them into cloudevents specification
- // compliant events and dispatch them to watchers.
- pod, err := goc.createGatewayPod()
- if err != nil {
- err = errors.Wrap(err, "failed to create gateway pod")
- goc.markGatewayPhase(v1alpha1.NodePhaseError, err.Error())
- return err
- }
- goc.log.WithField(common.LabelPodName, pod.Name).Info("gateway pod is created")
+// updateGatewayState updates the gateway state
+func (ctx *gatewayContext) updateGatewayState() {
+ if ctx.updated {
+ var err error
+ eventType := common.StateChangeEventType
+ labels := map[string]string{
+ common.LabelResourceName: ctx.gateway.Name,
+ LabelPhase: string(ctx.gateway.Status.Phase),
+ LabelControllerInstanceID: ctx.controller.Config.InstanceID,
+ common.LabelOperation: "persist_gateway_state",
+ }
- // expose gateway if service is configured
- if goc.gw.Spec.Service != nil {
- svc, err := goc.createGatewayService()
+ ctx.gateway, err = PersistUpdates(ctx.controller.gatewayClient, ctx.gateway, ctx.logger)
if err != nil {
- err = errors.Wrap(err, "failed to create gateway service")
- goc.markGatewayPhase(v1alpha1.NodePhaseError, err.Error())
- return err
+ ctx.logger.WithError(err).Errorln("failed to persist gateway update, escalating...")
+ eventType = common.EscalationEventType
}
- goc.log.WithField(common.LabelServiceName, svc.Name).Info("gateway service is created")
- }
- goc.log.Info("marking gateway as active")
- goc.markGatewayPhase(v1alpha1.NodePhaseRunning, "gateway is active")
- return nil
-}
-
-func (goc *gwOperationCtx) createGatewayPod() (*corev1.Pod, error) {
- pod, err := goc.gwrctx.newGatewayPod()
- if err != nil {
- goc.log.WithError(err).Error("failed to initialize pod for gateway")
- return nil, err
- }
- pod, err = goc.gwrctx.createGatewayPod(pod)
- if err != nil {
- goc.log.WithError(err).Error("failed to create pod for gateway")
- return nil, err
+ labels[common.LabelEventType] = string(eventType)
+ if err := common.GenerateK8sEvent(ctx.controller.k8sClient,
+ "persist update",
+ eventType,
+ "gateway state update",
+ ctx.gateway.Name,
+ ctx.gateway.Namespace,
+ ctx.controller.Config.InstanceID,
+ gateway.Kind,
+ labels,
+ ); err != nil {
+ ctx.logger.WithError(err).Errorln("failed to create K8s event to logger gateway state persist operation")
+ return
+ }
+ ctx.logger.Infoln("successfully persisted gateway resource update and created K8s event")
}
- return pod, nil
+ ctx.updated = false
}
-func (goc *gwOperationCtx) createGatewayService() (*corev1.Service, error) {
- svc, err := goc.gwrctx.newGatewayService()
- if err != nil {
- goc.log.WithError(err).Error("failed to initialize service for gateway")
- return nil, err
- }
- svc, err = goc.gwrctx.createGatewayService(svc)
- if err != nil {
- goc.log.WithError(err).Error("failed to create service for gateway")
- return nil, err
- }
- return svc, nil
-}
+// mark the gateway phase
+func (ctx *gatewayContext) markGatewayPhase(phase v1alpha1.NodePhase, message string) {
+ justCompleted := ctx.gateway.Status.Phase != phase
+ if justCompleted {
+ ctx.logger.WithFields(
+ map[string]interface{}{
+ "old": string(ctx.gateway.Status.Phase),
+ "new": string(phase),
+ },
+ ).Infoln("phase changed")
-func (goc *gwOperationCtx) updateGatewayResources() error {
- err := Validate(goc.gw)
- if err != nil {
- goc.log.WithError(err).Error("gateway validation failed")
- err = errors.Wrap(err, "failed to validate gateway")
- if goc.gw.Status.Phase != v1alpha1.NodePhaseError {
- goc.markGatewayPhase(v1alpha1.NodePhaseError, err.Error())
+ ctx.gateway.Status.Phase = phase
+ if ctx.gateway.ObjectMeta.Labels == nil {
+ ctx.gateway.ObjectMeta.Labels = make(map[string]string)
+ }
+ if ctx.gateway.Annotations == nil {
+ ctx.gateway.Annotations = make(map[string]string)
}
- return err
- }
- _, podChanged, err := goc.updateGatewayPod()
- if err != nil {
- err = errors.Wrap(err, "failed to update gateway pod")
- goc.markGatewayPhase(v1alpha1.NodePhaseError, err.Error())
- return err
+ ctx.gateway.ObjectMeta.Labels[LabelPhase] = string(phase)
+ // add annotations so a resource sensor can watch this gateway.
+ ctx.gateway.Annotations[LabelPhase] = string(phase)
}
- _, svcChanged, err := goc.updateGatewayService()
- if err != nil {
- err = errors.Wrap(err, "failed to update gateway service")
- goc.markGatewayPhase(v1alpha1.NodePhaseError, err.Error())
- return err
+ if ctx.gateway.Status.StartedAt.IsZero() {
+ ctx.gateway.Status.StartedAt = metav1.Time{Time: time.Now().UTC()}
}
- if goc.gw.Status.Phase != v1alpha1.NodePhaseRunning && (podChanged || svcChanged) {
- goc.markGatewayPhase(v1alpha1.NodePhaseRunning, "gateway is active")
- }
+ ctx.logger.WithFields(
+ map[string]interface{}{
+ "old": ctx.gateway.Status.Message,
+ "new": message,
+ },
+ ).Infoln("phase change message")
- return nil
+ ctx.gateway.Status.Message = message
+ ctx.updated = true
}
-func (goc *gwOperationCtx) updateGatewayPod() (*corev1.Pod, bool, error) {
- // Check if gateway spec has changed for pod.
- existingPod, err := goc.gwrctx.getGatewayPod()
- if err != nil {
- goc.log.WithError(err).Error("failed to get pod for gateway")
- return nil, false, err
- }
+// PersistUpdates of the gateway resource
+func PersistUpdates(client gwclient.Interface, gw *v1alpha1.Gateway, log *logrus.Logger) (*v1alpha1.Gateway, error) {
+ gatewayClient := client.ArgoprojV1alpha1().Gateways(gw.ObjectMeta.Namespace)
- // create a new pod spec
- newPod, err := goc.gwrctx.newGatewayPod()
- if err != nil {
- goc.log.WithError(err).Error("failed to initialize pod for gateway")
- return nil, false, err
- }
+ // in case persist update fails
+ oldgw := gw.DeepCopy()
- // check if pod spec remained unchanged
- if existingPod != nil {
- if existingPod.Annotations != nil && existingPod.Annotations[common.AnnotationGatewayResourceSpecHashName] == newPod.Annotations[common.AnnotationGatewayResourceSpecHashName] {
- goc.log.WithField(common.LabelPodName, existingPod.Name).Debug("gateway pod spec unchanged")
- return nil, false, nil
+ gw, err := gatewayClient.Update(gw)
+ if err != nil {
+ log.WithError(err).Warn("error updating gateway")
+ if errors.IsConflict(err) {
+ return oldgw, err
}
-
- // By now we are sure that the spec changed, so lets go ahead and delete the exisitng gateway pod.
- goc.log.WithField(common.LabelPodName, existingPod.Name).Info("gateway pod spec changed")
-
- err := goc.gwrctx.deleteGatewayPod(existingPod)
+ log.Info("re-applying updates on latest version and retrying update")
+ err = ReapplyUpdates(client, gw)
if err != nil {
- goc.log.WithError(err).Error("failed to delete pod for gateway")
- return nil, false, err
+ log.WithError(err).Error("failed to re-apply update")
+ return oldgw, err
}
-
- goc.log.WithField(common.LabelPodName, existingPod.Name).Info("gateway pod is deleted")
}
-
- // Create new pod for updated gateway spec.
- createdPod, err := goc.gwrctx.createGatewayPod(newPod)
- if err != nil {
- goc.log.WithError(err).Error("failed to create pod for gateway")
- return nil, false, err
- }
- goc.log.WithError(err).WithField(common.LabelPodName, newPod.Name).Info("gateway pod is created")
-
- return createdPod, true, nil
+ log.WithField(common.LabelPhase, string(gw.Status.Phase)).Info("gateway state updated successfully")
+ return gw, nil
}
-func (goc *gwOperationCtx) updateGatewayService() (*corev1.Service, bool, error) {
- // Check if gateway spec has changed for service.
- existingSvc, err := goc.gwrctx.getGatewayService()
- if err != nil {
- goc.log.WithError(err).Error("failed to get service for gateway")
- return nil, false, err
- }
-
- // create a new service spec
- newSvc, err := goc.gwrctx.newGatewayService()
- if err != nil {
- goc.log.WithError(err).Error("failed to initialize service for gateway")
- return nil, false, err
- }
-
- if existingSvc != nil {
- // updated spec doesn't have service defined, delete existing service.
- if newSvc == nil {
- if err := goc.gwrctx.deleteGatewayService(existingSvc); err != nil {
- return nil, false, err
+// ReapplyUpdates to gateway resource
+func ReapplyUpdates(client gwclient.Interface, gw *v1alpha1.Gateway) error {
+ return wait.ExponentialBackoff(common.DefaultRetry, func() (bool, error) {
+ gatewayClient := client.ArgoprojV1alpha1().Gateways(gw.Namespace)
+ g, err := gatewayClient.Update(gw)
+ if err != nil {
+ if !common.IsRetryableKubeAPIError(err) {
+ return false, err
}
- return nil, true, nil
+ return false, nil
}
-
- // check if service spec remained unchanged
- if existingSvc.Annotations[common.AnnotationGatewayResourceSpecHashName] == newSvc.Annotations[common.AnnotationGatewayResourceSpecHashName] {
- goc.log.WithField(common.LabelServiceName, existingSvc.Name).Debug("gateway service spec unchanged")
- return nil, false, nil
- }
-
- // service spec changed, delete existing service and create new one
- goc.log.WithField(common.LabelServiceName, existingSvc.Name).Info("gateway service spec changed")
-
- if err := goc.gwrctx.deleteGatewayService(existingSvc); err != nil {
- return nil, false, err
- }
- } else if newSvc == nil {
- // gateway service doesn't exist originally
- return nil, false, nil
- }
-
- // change createGatewayService to take a service spec
- createdSvc, err := goc.gwrctx.createGatewayService(newSvc)
- if err != nil {
- goc.log.WithError(err).Error("failed to create service for gateway")
- return nil, false, err
- }
- goc.log.WithField(common.LabelServiceName, createdSvc.Name).Info("gateway service is created")
-
- return createdSvc, true, nil
-}
-
-// mark the overall gateway phase
-func (goc *gwOperationCtx) markGatewayPhase(phase v1alpha1.NodePhase, message string) {
- justCompleted := goc.gw.Status.Phase != phase
- if justCompleted {
- goc.log.WithFields(
- map[string]interface{}{
- "old": string(goc.gw.Status.Phase),
- "new": string(phase),
- },
- ).Info("phase changed")
-
- goc.gw.Status.Phase = phase
- if goc.gw.ObjectMeta.Labels == nil {
- goc.gw.ObjectMeta.Labels = make(map[string]string)
- }
- if goc.gw.Annotations == nil {
- goc.gw.Annotations = make(map[string]string)
- }
- goc.gw.ObjectMeta.Labels[common.LabelGatewayKeyPhase] = string(phase)
- // add annotations so a resource sensor can watch this gateway.
- goc.gw.Annotations[common.LabelGatewayKeyPhase] = string(phase)
- }
- if goc.gw.Status.StartedAt.IsZero() {
- goc.gw.Status.StartedAt = metav1.Time{Time: time.Now().UTC()}
- }
- goc.log.WithFields(
- map[string]interface{}{
- "old": string(goc.gw.Status.Message),
- "new": message,
- },
- ).Info("message")
- goc.gw.Status.Message = message
- goc.updated = true
+ gw = g
+ return true, nil
+ })
}
diff --git a/controllers/gateway/operator_test.go b/controllers/gateway/operator_test.go
index 09528a9a54..419f30d5be 100644
--- a/controllers/gateway/operator_test.go
+++ b/controllers/gateway/operator_test.go
@@ -19,337 +19,147 @@ package gateway
import (
"testing"
+ "github.com/argoproj/argo-events/common"
"github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
+ "github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/client-go/tools/cache"
)
-var testGatewayStr = `apiVersion: argoproj.io/v1alpha1
-kind: Gateway
-metadata:
- name: webhook-gateway
- namespace: argo-events
- labels:
- gateways.argoproj.io/gateway-controller-instanceid: argo-events
- gateway-name: "webhook-gateway"
- argo-events-gateway-version: v0.11
-spec:
- eventSource: "webhook-gateway-configmap"
- type: "webhook"
- processorPort: "9330"
- eventProtocol:
- type: "NATS"
- nats:
- url: "nats://nats.argo-events:4222"
- type: "Standard"
- eventVersion: "1.0"
- template:
- metadata:
- name: "webhook-gateway"
- labels:
- gateway-name: "webhook-gateway"
- spec:
- containers:
- - name: "gateway-client"
- image: "argoproj/gateway-client:v0.6.2"
- imagePullPolicy: "Always"
- command: ["/bin/gateway-client"]
- - name: "webhook-events"
- image: "argoproj/webhook-gateway:v0.6.2"
- imagePullPolicy: "Always"
- command: ["/bin/webhook-gateway"]
- serviceAccountName: "argo-events-sa"
- service:
- metadata:
- name: webhook-gateway-svc
- spec:
- selector:
- gateway-name: "webhook-gateway"
- ports:
- - port: 12000
- targetPort: 12000
- type: LoadBalancer`
-
-var (
- gatewayPodName = "webhook-gateway"
- gatewaySvcName = "webhook-gateway-svc"
-)
+func TestGatewayOperateLifecycle(t *testing.T) {
+ controller := newController()
+ ctx := newGatewayContext(gatewayObj.DeepCopy(), controller)
+ gateway, err := controller.gatewayClient.ArgoprojV1alpha1().Gateways(gatewayObj.Namespace).Create(gatewayObj)
+ assert.Nil(t, err)
+ assert.NotNil(t, gateway)
+
+ tests := []struct {
+ name string
+ updateStateFunc func()
+ testFunc func(oldMetadata *v1alpha1.GatewayResource)
+ }{
+ {
+ name: "process a new gateway object",
+ updateStateFunc: func() {},
+ testFunc: func(oldMetadata *v1alpha1.GatewayResource) {
+ assert.Nil(t, oldMetadata)
+ deployment, err := controller.k8sClient.AppsV1().Deployments(ctx.gateway.Status.Resources.Deployment.Namespace).Get(ctx.gateway.Status.Resources.Deployment.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, deployment)
+ service, err := controller.k8sClient.CoreV1().Services(ctx.gateway.Status.Resources.Service.Namespace).Get(ctx.gateway.Status.Resources.Service.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, service)
+ assert.NotNil(t, ctx.gateway.Status.Resources)
+ assert.Equal(t, ctx.gateway.Status.Resources.Deployment.Name, deployment.Name)
+ assert.Equal(t, ctx.gateway.Status.Resources.Service.Name, service.Name)
+ gateway, err := controller.gatewayClient.ArgoprojV1alpha1().Gateways(ctx.gateway.Namespace).Get(ctx.gateway.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, gateway)
+ assert.Equal(t, gateway.Status.Phase, v1alpha1.NodePhaseRunning)
+ ctx.gateway = gateway.DeepCopy()
+ },
+ },
+ {
+ name: "process a updated gateway object",
+ updateStateFunc: func() {
+ ctx.gateway.Spec.Template.Spec.Containers[0].Name = "new-name"
+ },
+ testFunc: func(oldMetadata *v1alpha1.GatewayResource) {
+ currentMetadata := ctx.gateway.Status.Resources.DeepCopy()
+ assert.NotEqual(t, oldMetadata.Deployment.Annotations[common.AnnotationResourceSpecHash], currentMetadata.Deployment.Annotations[common.AnnotationResourceSpecHash])
+ assert.Equal(t, oldMetadata.Service.Annotations[common.AnnotationResourceSpecHash], currentMetadata.Service.Annotations[common.AnnotationResourceSpecHash])
+ },
+ },
+ {
+ name: "process a gateway object in error",
+ updateStateFunc: func() {
+ ctx.gateway.Status.Phase = v1alpha1.NodePhaseError
+ ctx.gateway.Spec.Template.Spec.Containers[0].Name = "fixed-name"
+ },
+ testFunc: func(oldMetadata *v1alpha1.GatewayResource) {
+ currentMetadata := ctx.gateway.Status.Resources.DeepCopy()
+ assert.NotEqual(t, oldMetadata.Deployment.Annotations[common.AnnotationResourceSpecHash], currentMetadata.Deployment.Annotations[common.AnnotationResourceSpecHash])
+ assert.Equal(t, oldMetadata.Service.Annotations[common.AnnotationResourceSpecHash], currentMetadata.Service.Annotations[common.AnnotationResourceSpecHash])
+ gateway, err := controller.gatewayClient.ArgoprojV1alpha1().Gateways(ctx.gateway.Namespace).Get(ctx.gateway.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, gateway)
+ assert.Equal(t, gateway.Status.Phase, v1alpha1.NodePhaseRunning)
+ },
+ },
+ }
-func getGateway() (*v1alpha1.Gateway, error) {
- gwBytes := []byte(testGatewayStr)
- var gateway v1alpha1.Gateway
- err := yaml.Unmarshal(gwBytes, &gateway)
- return &gateway, err
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ oldMetadata := ctx.gateway.Status.Resources.DeepCopy()
+ test.updateStateFunc()
+ err := ctx.operate()
+ assert.Nil(t, err)
+ test.testFunc(oldMetadata)
+ })
+ }
}
-func waitForAllInformers(done chan struct{}, controller *GatewayController) {
- cache.WaitForCacheSync(done, controller.informer.HasSynced)
- cache.WaitForCacheSync(done, controller.podInformer.Informer().HasSynced)
- cache.WaitForCacheSync(done, controller.svcInformer.Informer().HasSynced)
+func TestPersistUpdates(t *testing.T) {
+ controller := newController()
+ ctx := newGatewayContext(gatewayObj.DeepCopy(), controller)
+ gateway, err := controller.gatewayClient.ArgoprojV1alpha1().Gateways(gatewayObj.Namespace).Create(gatewayObj)
+ assert.Nil(t, err)
+ assert.NotNil(t, gateway)
+
+ ctx.gateway = gateway
+ ctx.gateway.Spec.Template.Name = "updated-name"
+ gateway, err = PersistUpdates(controller.gatewayClient, ctx.gateway, controller.logger)
+ assert.Nil(t, err)
+ assert.Equal(t, gateway.Spec.Template.Name, "updated-name")
}
-func getPodAndService(controller *GatewayController, namespace string) (*corev1.Pod, *corev1.Service, error) {
- pod, err := controller.kubeClientset.CoreV1().Pods(namespace).Get(gatewayPodName, metav1.GetOptions{})
- if err != nil {
- return nil, nil, err
- }
- svc, err := controller.kubeClientset.CoreV1().Services(namespace).Get(gatewaySvcName, metav1.GetOptions{})
- if err != nil {
- return nil, nil, err
- }
- return pod, svc, err
+func TestReapplyUpdates(t *testing.T) {
+ controller := newController()
+ ctx := newGatewayContext(gatewayObj.DeepCopy(), controller)
+ gateway, err := controller.gatewayClient.ArgoprojV1alpha1().Gateways(gatewayObj.Namespace).Create(gatewayObj)
+ assert.Nil(t, err)
+ assert.NotNil(t, gateway)
+
+ ctx.gateway = gateway
+ ctx.gateway.Spec.Template.Name = "updated-name"
+ gateway, err = PersistUpdates(controller.gatewayClient, ctx.gateway, controller.logger)
+ assert.Nil(t, err)
+ assert.Equal(t, gateway.Spec.Template.Name, "updated-name")
}
-func deletePodAndService(controller *GatewayController, namespace string) error {
- err := controller.kubeClientset.CoreV1().Pods(namespace).Delete(gatewayPodName, &metav1.DeleteOptions{})
- if err != nil {
- return err
- }
- err = controller.kubeClientset.CoreV1().Services(namespace).Delete(gatewaySvcName, &metav1.DeleteOptions{})
- return err
+func TestOperator_MarkPhase(t *testing.T) {
+ controller := newController()
+ ctx := newGatewayContext(gatewayObj.DeepCopy(), controller)
+ assert.Equal(t, ctx.gateway.Status.Phase, v1alpha1.NodePhaseNew)
+ assert.Equal(t, ctx.gateway.Status.Message, "")
+ ctx.gateway.Status.Phase = v1alpha1.NodePhaseRunning
+ ctx.markGatewayPhase(v1alpha1.NodePhaseRunning, "node is active")
+ assert.Equal(t, ctx.gateway.Status.Phase, v1alpha1.NodePhaseRunning)
+ assert.Equal(t, ctx.gateway.Status.Message, "node is active")
}
-func TestGatewayOperateLifecycle(t *testing.T) {
- done := make(chan struct{})
- convey.Convey("Given a gateway resource spec, parse it", t, func() {
- fakeController := getGatewayController()
- gateway, err := getGateway()
- convey.Convey("Make sure no error occurs", func() {
- convey.So(err, convey.ShouldBeNil)
-
- convey.Convey("Create the gateway", func() {
- gateway, err = fakeController.gatewayClientset.ArgoprojV1alpha1().Gateways(fakeController.Config.Namespace).Create(gateway)
-
- convey.Convey("No error should occur and gateway resource should not be empty", func() {
- convey.So(err, convey.ShouldBeNil)
- convey.So(gateway, convey.ShouldNotBeNil)
-
- convey.Convey("Create a new gateway operation context", func() {
- goc := newGatewayOperationCtx(gateway, fakeController)
- convey.So(goc, convey.ShouldNotBeNil)
-
- convey.Convey("Operate on new gateway", func() {
- goc.markGatewayPhase(v1alpha1.NodePhaseNew, "test")
- err := goc.operate()
-
- convey.Convey("Operation must succeed", func() {
- convey.So(err, convey.ShouldBeNil)
- waitForAllInformers(done, fakeController)
-
- convey.Convey("A gateway pod and service must be created", func() {
- gatewayPod, gatewaySvc, err := getPodAndService(fakeController, gateway.Namespace)
- convey.So(err, convey.ShouldBeNil)
- convey.So(gatewayPod, convey.ShouldNotBeNil)
- convey.So(gatewaySvc, convey.ShouldNotBeNil)
-
- convey.Convey("Go to running state", func() {
- gateway, err := fakeController.gatewayClientset.ArgoprojV1alpha1().Gateways(gateway.Namespace).Get(gateway.Name, metav1.GetOptions{})
- convey.So(err, convey.ShouldBeNil)
- convey.So(gateway.Status.Phase, convey.ShouldEqual, v1alpha1.NodePhaseRunning)
- })
- })
- })
- })
-
- convey.Convey("Operate on gateway in running state", func() {
- err := fakeController.gatewayClientset.ArgoprojV1alpha1().Gateways(gateway.Namespace).Delete(gateway.Name, &metav1.DeleteOptions{})
- convey.So(err, convey.ShouldBeNil)
- gateway, err = fakeController.gatewayClientset.ArgoprojV1alpha1().Gateways(gateway.Namespace).Create(gateway)
- convey.So(err, convey.ShouldBeNil)
- convey.So(gateway, convey.ShouldNotBeNil)
-
- goc.markGatewayPhase(v1alpha1.NodePhaseNew, "test")
-
- // Operate it once to create pod and service
- waitForAllInformers(done, fakeController)
- err = goc.operate()
- convey.So(err, convey.ShouldBeNil)
- waitForAllInformers(done, fakeController)
-
- convey.Convey("Operation must succeed", func() {
- goc.markGatewayPhase(v1alpha1.NodePhaseRunning, "test")
-
- waitForAllInformers(done, fakeController)
- err := goc.operate()
- convey.So(err, convey.ShouldBeNil)
- waitForAllInformers(done, fakeController)
-
- convey.Convey("Untouch pod and service", func() {
- gatewayPod, gatewaySvc, err := getPodAndService(fakeController, gateway.Namespace)
- convey.So(err, convey.ShouldBeNil)
- convey.So(gatewayPod, convey.ShouldNotBeNil)
- convey.So(gatewaySvc, convey.ShouldNotBeNil)
-
- convey.Convey("Stay in running state", func() {
- gateway, err := fakeController.gatewayClientset.ArgoprojV1alpha1().Gateways(gateway.Namespace).Get(gateway.Name, metav1.GetOptions{})
- convey.So(err, convey.ShouldBeNil)
- convey.So(gateway.Status.Phase, convey.ShouldEqual, v1alpha1.NodePhaseRunning)
- })
- })
- })
-
- convey.Convey("Delete pod and service", func() {
- err := deletePodAndService(fakeController, gateway.Namespace)
- convey.So(err, convey.ShouldBeNil)
-
- convey.Convey("Operation must succeed", func() {
- goc.markGatewayPhase(v1alpha1.NodePhaseRunning, "test")
-
- waitForAllInformers(done, fakeController)
- err := goc.operate()
- convey.So(err, convey.ShouldBeNil)
- waitForAllInformers(done, fakeController)
-
- convey.Convey("Create pod and service", func() {
- gatewayPod, gatewaySvc, err := getPodAndService(fakeController, gateway.Namespace)
- convey.So(err, convey.ShouldBeNil)
- convey.So(gatewayPod, convey.ShouldNotBeNil)
- convey.So(gatewaySvc, convey.ShouldNotBeNil)
-
- convey.Convey("Stay in running state", func() {
- gateway, err := fakeController.gatewayClientset.ArgoprojV1alpha1().Gateways(gateway.Namespace).Get(gateway.Name, metav1.GetOptions{})
- convey.So(err, convey.ShouldBeNil)
- convey.So(gateway.Status.Phase, convey.ShouldEqual, v1alpha1.NodePhaseRunning)
- })
- })
- })
- })
-
- convey.Convey("Change pod and service spec", func() {
- goc.gwrctx.gw.Spec.Template.Spec.RestartPolicy = "Never"
- goc.gwrctx.gw.Spec.Service.Spec.ClusterIP = "127.0.0.1"
-
- gatewayPod, gatewaySvc, err := getPodAndService(fakeController, gateway.Namespace)
- convey.So(err, convey.ShouldBeNil)
- convey.So(gatewayPod, convey.ShouldNotBeNil)
- convey.So(gatewaySvc, convey.ShouldNotBeNil)
-
- convey.Convey("Operation must succeed", func() {
- goc.markGatewayPhase(v1alpha1.NodePhaseRunning, "test")
-
- waitForAllInformers(done, fakeController)
- err := goc.operate()
- convey.So(err, convey.ShouldBeNil)
- waitForAllInformers(done, fakeController)
-
- convey.Convey("Delete pod and service", func() {
- gatewayPod, gatewaySvc, err := getPodAndService(fakeController, gateway.Namespace)
- convey.So(err, convey.ShouldBeNil)
- convey.So(gatewayPod.Spec.RestartPolicy, convey.ShouldEqual, "Never")
- convey.So(gatewaySvc.Spec.ClusterIP, convey.ShouldEqual, "127.0.0.1")
-
- convey.Convey("Stay in running state", func() {
- gateway, err := fakeController.gatewayClientset.ArgoprojV1alpha1().Gateways(gateway.Namespace).Get(gateway.Name, metav1.GetOptions{})
- convey.So(err, convey.ShouldBeNil)
- convey.So(gateway.Status.Phase, convey.ShouldEqual, v1alpha1.NodePhaseRunning)
- })
- })
- })
- })
- })
-
- convey.Convey("Operate on gateway in error state", func() {
- err := fakeController.gatewayClientset.ArgoprojV1alpha1().Gateways(gateway.Namespace).Delete(gateway.Name, &metav1.DeleteOptions{})
- convey.So(err, convey.ShouldBeNil)
- gateway, err = fakeController.gatewayClientset.ArgoprojV1alpha1().Gateways(gateway.Namespace).Create(gateway)
- convey.So(err, convey.ShouldBeNil)
- convey.So(gateway, convey.ShouldNotBeNil)
-
- goc.markGatewayPhase(v1alpha1.NodePhaseNew, "test")
-
- // Operate it once to create pod and service
- waitForAllInformers(done, fakeController)
- err = goc.operate()
- convey.So(err, convey.ShouldBeNil)
- waitForAllInformers(done, fakeController)
-
- convey.Convey("Operation must succeed", func() {
- goc.markGatewayPhase(v1alpha1.NodePhaseError, "test")
-
- waitForAllInformers(done, fakeController)
- err := goc.operate()
- convey.So(err, convey.ShouldBeNil)
- waitForAllInformers(done, fakeController)
-
- convey.Convey("Untouch pod and service", func() {
- gatewayPod, gatewaySvc, err := getPodAndService(fakeController, gateway.Namespace)
- convey.So(err, convey.ShouldBeNil)
- convey.So(gatewayPod, convey.ShouldNotBeNil)
- convey.So(gatewaySvc, convey.ShouldNotBeNil)
-
- convey.Convey("Stay in error state", func() {
- gateway, err := fakeController.gatewayClientset.ArgoprojV1alpha1().Gateways(gateway.Namespace).Get(gateway.Name, metav1.GetOptions{})
- convey.So(err, convey.ShouldBeNil)
- convey.So(gateway.Status.Phase, convey.ShouldEqual, v1alpha1.NodePhaseError)
- })
- })
- })
-
- convey.Convey("Delete pod and service", func() {
- err := deletePodAndService(fakeController, gateway.Namespace)
- convey.So(err, convey.ShouldBeNil)
-
- convey.Convey("Operation must succeed", func() {
- goc.markGatewayPhase(v1alpha1.NodePhaseError, "test")
-
- waitForAllInformers(done, fakeController)
- err := goc.operate()
- convey.So(err, convey.ShouldBeNil)
- waitForAllInformers(done, fakeController)
-
- convey.Convey("Create pod and service", func() {
- gatewayPod, gatewaySvc, err := getPodAndService(fakeController, gateway.Namespace)
- convey.So(err, convey.ShouldBeNil)
- convey.So(gatewayPod, convey.ShouldNotBeNil)
- convey.So(gatewaySvc, convey.ShouldNotBeNil)
-
- convey.Convey("Go to running state", func() {
- gateway, err := fakeController.gatewayClientset.ArgoprojV1alpha1().Gateways(gateway.Namespace).Get(gateway.Name, metav1.GetOptions{})
- convey.So(err, convey.ShouldBeNil)
- convey.So(gateway.Status.Phase, convey.ShouldEqual, v1alpha1.NodePhaseRunning)
- })
- })
- })
- })
-
- convey.Convey("Change pod and service spec", func() {
- goc.gwrctx.gw.Spec.Template.Spec.RestartPolicy = "Never"
- goc.gwrctx.gw.Spec.Service.Spec.ClusterIP = "127.0.0.1"
-
- gatewayPod, gatewaySvc, err := getPodAndService(fakeController, gateway.Namespace)
- convey.So(err, convey.ShouldBeNil)
- convey.So(gatewayPod, convey.ShouldNotBeNil)
- convey.So(gatewaySvc, convey.ShouldNotBeNil)
-
- convey.Convey("Operation must succeed", func() {
- goc.markGatewayPhase(v1alpha1.NodePhaseError, "test")
-
- waitForAllInformers(done, fakeController)
- err := goc.operate()
- convey.So(err, convey.ShouldBeNil)
- waitForAllInformers(done, fakeController)
-
- convey.Convey("Delete pod and service", func() {
- gatewayPod, gatewaySvc, err := getPodAndService(fakeController, gateway.Namespace)
- convey.So(err, convey.ShouldBeNil)
- convey.So(gatewayPod.Spec.RestartPolicy, convey.ShouldEqual, "Never")
- convey.So(gatewaySvc.Spec.ClusterIP, convey.ShouldEqual, "127.0.0.1")
-
- convey.Convey("Go to running state", func() {
- gateway, err := fakeController.gatewayClientset.ArgoprojV1alpha1().Gateways(gateway.Namespace).Get(gateway.Name, metav1.GetOptions{})
- convey.So(err, convey.ShouldBeNil)
- convey.So(gateway.Status.Phase, convey.ShouldEqual, v1alpha1.NodePhaseRunning)
- })
- })
- })
- })
- })
- })
- })
- })
- })
- })
+func TestOperator_UpdateGatewayState(t *testing.T) {
+ controller := newController()
+ ctx := newGatewayContext(gatewayObj.DeepCopy(), controller)
+ gateway, err := controller.gatewayClient.ArgoprojV1alpha1().Gateways(gatewayObj.Namespace).Create(gatewayObj)
+ assert.Nil(t, err)
+ assert.NotNil(t, gateway)
+ ctx.gateway = gateway.DeepCopy()
+ assert.Equal(t, ctx.gateway.Status.Phase, v1alpha1.NodePhaseNew)
+ ctx.gateway.Status.Phase = v1alpha1.NodePhaseRunning
+ ctx.updated = true
+ ctx.updateGatewayState()
+ gateway, err = controller.gatewayClient.ArgoprojV1alpha1().Gateways(ctx.gateway.Namespace).Get(ctx.gateway.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, gateway)
+ ctx.gateway = gateway.DeepCopy()
+ assert.Equal(t, ctx.gateway.Status.Phase, v1alpha1.NodePhaseRunning)
+
+ ctx.gateway.Status.Phase = v1alpha1.NodePhaseError
+ ctx.updated = false
+ ctx.updateGatewayState()
+ gateway, err = controller.gatewayClient.ArgoprojV1alpha1().Gateways(ctx.gateway.Namespace).Get(ctx.gateway.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, gateway)
+ ctx.gateway = gateway.DeepCopy()
+ assert.Equal(t, ctx.gateway.Status.Phase, v1alpha1.NodePhaseRunning)
}
diff --git a/controllers/gateway/resource.go b/controllers/gateway/resource.go
index 4f59d794c2..6603ef9678 100644
--- a/controllers/gateway/resource.go
+++ b/controllers/gateway/resource.go
@@ -1,169 +1,248 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
package gateway
import (
"github.com/argoproj/argo-events/common"
controllerscommon "github.com/argoproj/argo-events/controllers/common"
"github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
+ "github.com/pkg/errors"
+ appv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
+ apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/selection"
)
-type gwResourceCtx struct {
- // gw is the gateway-controller object
- gw *v1alpha1.Gateway
- // reference to the gateway-controller-controller
- controller *GatewayController
-
- controllerscommon.ChildResourceContext
+// buildServiceResource builds a new service that exposes gateway.
+func (ctx *gatewayContext) buildServiceResource() (*corev1.Service, error) {
+ if ctx.gateway.Spec.Service == nil {
+ return nil, nil
+ }
+ service := ctx.gateway.Spec.Service.DeepCopy()
+ if err := controllerscommon.SetObjectMeta(ctx.gateway, service, v1alpha1.SchemaGroupVersionKind); err != nil {
+ return nil, err
+ }
+ return service, nil
}
-// NewGatewayResourceContext returns new gwResourceCtx
-func NewGatewayResourceContext(gw *v1alpha1.Gateway, controller *GatewayController) gwResourceCtx {
- return gwResourceCtx{
- gw: gw,
- controller: controller,
- ChildResourceContext: controllerscommon.ChildResourceContext{
- SchemaGroupVersionKind: v1alpha1.SchemaGroupVersionKind,
- LabelOwnerName: common.LabelGatewayName,
- LabelKeyOwnerControllerInstanceID: common.LabelKeyGatewayControllerInstanceID,
- AnnotationOwnerResourceHashName: common.AnnotationGatewayResourceSpecHashName,
- InstanceID: controller.Config.InstanceID,
+// buildDeploymentResource builds a deployment resource for the gateway
+func (ctx *gatewayContext) buildDeploymentResource() (*appv1.Deployment, error) {
+ if ctx.gateway.Spec.Template == nil {
+ return nil, errors.New("gateway template can't be empty")
+ }
+
+ podTemplate := ctx.gateway.Spec.Template.DeepCopy()
+
+ replica := int32(ctx.gateway.Spec.Replica)
+ if replica == 0 {
+ replica = 1
+ }
+
+ deployment := &appv1.Deployment{
+ ObjectMeta: podTemplate.ObjectMeta,
+ Spec: appv1.DeploymentSpec{
+ Replicas: &replica,
+ Template: *podTemplate,
},
}
+
+ if deployment.Spec.Template.Labels == nil {
+ deployment.Spec.Template.Labels = map[string]string{}
+ }
+ deployment.Spec.Template.Labels[common.LabelObjectName] = ctx.gateway.Name
+
+ if deployment.Spec.Selector == nil {
+ deployment.Spec.Selector = &metav1.LabelSelector{
+ MatchLabels: map[string]string{},
+ }
+ }
+ deployment.Spec.Selector.MatchLabels[common.LabelObjectName] = ctx.gateway.Name
+
+ envVars := []corev1.EnvVar{
+ {
+ Name: common.EnvVarNamespace,
+ Value: ctx.gateway.Namespace,
+ },
+ {
+ Name: common.EnvVarEventSource,
+ Value: ctx.gateway.Spec.EventSourceRef.Name,
+ },
+ {
+ Name: common.EnvVarResourceName,
+ Value: ctx.gateway.Name,
+ },
+ {
+ Name: common.EnvVarControllerInstanceID,
+ Value: ctx.controller.Config.InstanceID,
+ },
+ {
+ Name: common.EnvVarGatewayServerPort,
+ Value: ctx.gateway.Spec.ProcessorPort,
+ },
+ }
+
+ for i, container := range deployment.Spec.Template.Spec.Containers {
+ container.Env = append(container.Env, envVars...)
+ deployment.Spec.Template.Spec.Containers[i] = container
+ }
+
+ if err := controllerscommon.SetObjectMeta(ctx.gateway, deployment, v1alpha1.SchemaGroupVersionKind); err != nil {
+ return nil, errors.Wrap(err, "failed to set the object metadata on the deployment object")
+ }
+
+ return deployment, nil
}
-// gatewayResourceLabelSelector returns label selector of the gateway of the context
-func (grc *gwResourceCtx) gatewayResourceLabelSelector() (labels.Selector, error) {
- req, err := labels.NewRequirement(common.LabelGatewayName, selection.Equals, []string{grc.gw.Name})
+// createGatewayResources creates gateway deployment and service
+func (ctx *gatewayContext) createGatewayResources() error {
+ if ctx.gateway.Status.Resources == nil {
+ ctx.gateway.Status.Resources = &v1alpha1.GatewayResource{}
+ }
+
+ deployment, err := ctx.createGatewayDeployment()
if err != nil {
- return nil, err
+ return err
}
- return labels.NewSelector().Add(*req), nil
-}
+ ctx.gateway.Status.Resources.Deployment = &deployment.ObjectMeta
+ ctx.logger.WithField("name", deployment.Name).WithField("namespace", deployment.Namespace).Infoln("gateway deployment is created")
-// createGatewayService creates a given service
-func (grc *gwResourceCtx) createGatewayService(svc *corev1.Service) (*corev1.Service, error) {
- return grc.controller.kubeClientset.CoreV1().Services(grc.gw.Namespace).Create(svc)
-}
+ if ctx.gateway.Spec.Service != nil {
+ service, err := ctx.createGatewayService()
+ if err != nil {
+ return err
+ }
+ ctx.gateway.Status.Resources.Service = &service.ObjectMeta
+ ctx.logger.WithField("name", service.Name).WithField("namespace", service.Namespace).Infoln("gateway service is created")
+ }
-// deleteGatewayService deletes a given service
-func (grc *gwResourceCtx) deleteGatewayService(svc *corev1.Service) error {
- return grc.controller.kubeClientset.CoreV1().Services(grc.gw.Namespace).Delete(svc.Name, &metav1.DeleteOptions{})
+ return nil
}
-// getGatewayService returns the service of gateway
-func (grc *gwResourceCtx) getGatewayService() (*corev1.Service, error) {
- selector, err := grc.gatewayResourceLabelSelector()
+// createGatewayDeployment creates a deployment for the gateway
+func (ctx *gatewayContext) createGatewayDeployment() (*appv1.Deployment, error) {
+ deployment, err := ctx.buildDeploymentResource()
if err != nil {
return nil, err
}
- svcs, err := grc.controller.svcInformer.Lister().Services(grc.gw.Namespace).List(selector)
+ return ctx.controller.k8sClient.AppsV1().Deployments(deployment.Namespace).Create(deployment)
+}
+
+// createGatewayService creates a service for the gateway
+func (ctx *gatewayContext) createGatewayService() (*corev1.Service, error) {
+ svc, err := ctx.buildServiceResource()
if err != nil {
return nil, err
}
- if len(svcs) == 0 {
- return nil, nil
- }
- return svcs[0], nil
+ return ctx.controller.k8sClient.CoreV1().Services(svc.Namespace).Create(svc)
}
-// newGatewayService returns a new service that exposes gateway.
-func (grc *gwResourceCtx) newGatewayService() (*corev1.Service, error) {
- servicTemplateSpec := grc.gw.Spec.Service.DeepCopy()
- if servicTemplateSpec == nil {
- return nil, nil
+// updateGatewayResources updates gateway deployment and service
+func (ctx *gatewayContext) updateGatewayResources() error {
+ deployment, err := ctx.updateGatewayDeployment()
+ if err != nil {
+ return err
}
- service := &corev1.Service{
- ObjectMeta: servicTemplateSpec.ObjectMeta,
- Spec: servicTemplateSpec.Spec,
+ if deployment != nil {
+ ctx.gateway.Status.Resources.Deployment = &deployment.ObjectMeta
+ ctx.logger.WithField("name", deployment.Name).WithField("namespace", deployment.Namespace).Infoln("gateway deployment is updated")
}
- if service.Namespace == "" {
- service.Namespace = grc.gw.Namespace
+
+ service, err := ctx.updateGatewayService()
+ if err != nil {
+ return err
}
- if service.Name == "" {
- service.Name = common.DefaultServiceName(grc.gw.Name)
+ if service != nil {
+ ctx.gateway.Status.Resources.Service = &service.ObjectMeta
+ ctx.logger.WithField("name", service.Name).WithField("namespace", service.Namespace).Infoln("gateway service is updated")
+ return nil
}
- err := grc.SetObjectMeta(grc.gw, service)
- return service, err
+ ctx.gateway.Status.Resources.Service = nil
+ return nil
}
-// getGatewayPod returns the pod of gateway
-func (grc *gwResourceCtx) getGatewayPod() (*corev1.Pod, error) {
- selector, err := grc.gatewayResourceLabelSelector()
+// updateGatewayDeployment updates the gateway deployment
+func (ctx *gatewayContext) updateGatewayDeployment() (*appv1.Deployment, error) {
+ newDeployment, err := ctx.buildDeploymentResource()
if err != nil {
return nil, err
}
- pods, err := grc.controller.podInformer.Lister().Pods(grc.gw.Namespace).List(selector)
+
+ currentMetadata := ctx.gateway.Status.Resources.Deployment
+ if currentMetadata == nil {
+ return nil, errors.New("deployment metadata is expected to be set in gateway object")
+ }
+
+ currentDeployment, err := ctx.controller.k8sClient.AppsV1().Deployments(currentMetadata.Namespace).Get(currentMetadata.Name, metav1.GetOptions{})
if err != nil {
+ if apierr.IsNotFound(err) {
+ return ctx.controller.k8sClient.AppsV1().Deployments(newDeployment.Namespace).Create(newDeployment)
+ }
return nil, err
}
- if len(pods) == 0 {
- return nil, nil
+
+ if currentDeployment.Annotations != nil && currentDeployment.Annotations[common.AnnotationResourceSpecHash] != newDeployment.Annotations[common.AnnotationResourceSpecHash] {
+ if err := ctx.controller.k8sClient.AppsV1().Deployments(currentDeployment.Namespace).Delete(currentDeployment.Name, &metav1.DeleteOptions{}); err != nil {
+ return nil, err
+ }
+ return ctx.controller.k8sClient.AppsV1().Deployments(newDeployment.Namespace).Create(newDeployment)
}
- return pods[0], nil
-}
-// createGatewayPod creates a given pod
-func (grc *gwResourceCtx) createGatewayPod(pod *corev1.Pod) (*corev1.Pod, error) {
- return grc.controller.kubeClientset.CoreV1().Pods(grc.gw.Namespace).Create(pod)
+ return nil, nil
}
-// deleteGatewayPod deletes a given pod
-func (grc *gwResourceCtx) deleteGatewayPod(pod *corev1.Pod) error {
- return grc.controller.kubeClientset.CoreV1().Pods(grc.gw.Namespace).Delete(pod.Name, &metav1.DeleteOptions{})
-}
+// updateGatewayService updates the gateway service
+func (ctx *gatewayContext) updateGatewayService() (*corev1.Service, error) {
+ newService, err := ctx.buildServiceResource()
+ if err != nil {
+ return nil, err
+ }
+ if newService == nil && ctx.gateway.Status.Resources.Service != nil {
+ if err := ctx.controller.k8sClient.CoreV1().Services(ctx.gateway.Status.Resources.Service.Namespace).Delete(ctx.gateway.Status.Resources.Service.Name, &metav1.DeleteOptions{}); err != nil {
+ return nil, err
+ }
+ return nil, nil
+ }
-// newGatewayPod returns a new pod of gateway
-func (grc *gwResourceCtx) newGatewayPod() (*corev1.Pod, error) {
- podTemplateSpec := grc.gw.Spec.Template.DeepCopy()
- pod := &corev1.Pod{
- ObjectMeta: podTemplateSpec.ObjectMeta,
- Spec: podTemplateSpec.Spec,
+ if newService == nil {
+ return nil, nil
}
- if pod.Namespace == "" {
- pod.Namespace = grc.gw.Namespace
+
+ if ctx.gateway.Status.Resources.Service == nil {
+ return ctx.controller.k8sClient.CoreV1().Services(newService.Namespace).Create(newService)
}
- if pod.Name == "" {
- pod.Name = grc.gw.Name
+
+ currentMetadata := ctx.gateway.Status.Resources.Service
+ currentService, err := ctx.controller.k8sClient.CoreV1().Services(currentMetadata.Namespace).Get(currentMetadata.Name, metav1.GetOptions{})
+ if err != nil {
+ return ctx.controller.k8sClient.CoreV1().Services(newService.Namespace).Create(newService)
}
- grc.setupContainersForGatewayPod(pod)
- err := grc.SetObjectMeta(grc.gw, pod)
- return pod, err
-}
-// containers required for gateway deployment
-func (grc *gwResourceCtx) setupContainersForGatewayPod(pod *corev1.Pod) {
- // env variables
- envVars := []corev1.EnvVar{
- {
- Name: common.EnvVarGatewayNamespace,
- Value: grc.gw.Namespace,
- },
- {
- Name: common.EnvVarGatewayEventSourceConfigMap,
- Value: grc.gw.Spec.EventSource,
- },
- {
- Name: common.EnvVarGatewayName,
- Value: grc.gw.Name,
- },
- {
- Name: common.EnvVarGatewayControllerInstanceID,
- Value: grc.controller.Config.InstanceID,
- },
- {
- Name: common.EnvVarGatewayControllerName,
- Value: common.LabelGatewayControllerName,
- },
- {
- Name: common.EnvVarGatewayServerPort,
- Value: grc.gw.Spec.ProcessorPort,
- },
+ if currentMetadata == nil {
+ return nil, errors.New("service metadata is expected to be set in gateway object")
}
- for i, container := range pod.Spec.Containers {
- container.Env = append(container.Env, envVars...)
- pod.Spec.Containers[i] = container
+
+ if currentService.Annotations != nil && currentService.Annotations[common.AnnotationResourceSpecHash] != newService.Annotations[common.AnnotationResourceSpecHash] {
+ if err := ctx.controller.k8sClient.CoreV1().Services(currentMetadata.Namespace).Delete(currentMetadata.Name, &metav1.DeleteOptions{}); err != nil {
+ return nil, err
+ }
+ if ctx.gateway.Spec.Service != nil {
+ return ctx.controller.k8sClient.CoreV1().Services(newService.Namespace).Create(newService)
+ }
}
+
+ return currentService, nil
}
diff --git a/controllers/gateway/resource_test.go b/controllers/gateway/resource_test.go
new file mode 100644
index 0000000000..553d6af09a
--- /dev/null
+++ b/controllers/gateway/resource_test.go
@@ -0,0 +1,316 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gateway
+
+import (
+ "testing"
+
+ "github.com/argoproj/argo-events/common"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
+ "github.com/stretchr/testify/assert"
+ corev1 "k8s.io/api/core/v1"
+ apierror "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+var gatewayObj = &v1alpha1.Gateway{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "fake-gateway",
+ Namespace: common.DefaultControllerNamespace,
+ },
+ Spec: v1alpha1.GatewaySpec{
+ EventSourceRef: &v1alpha1.EventSourceRef{
+ Name: "fake-event-source",
+ },
+ Replica: 1,
+ Type: apicommon.WebhookEvent,
+ ProcessorPort: "8080",
+ Template: &corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "webhook-gateway",
+ },
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "gateway-client",
+ Image: "argoproj/gateway-client",
+ ImagePullPolicy: corev1.PullAlways,
+ },
+ {
+ Name: "gateway-server",
+ ImagePullPolicy: corev1.PullAlways,
+ Image: "argoproj/webhook-gateway",
+ },
+ },
+ },
+ },
+ Service: &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "webhook-gateway-svc",
+ },
+ Spec: corev1.ServiceSpec{
+ Type: corev1.ServiceTypeLoadBalancer,
+ Selector: map[string]string{
+ "gateway-name": "webhook-gateway",
+ },
+ Ports: []corev1.ServicePort{
+ {
+ Name: "server-port",
+ Port: 12000,
+ TargetPort: intstr.FromInt(12000),
+ },
+ },
+ },
+ },
+ EventProtocol: &apicommon.EventProtocol{
+ Type: apicommon.HTTP,
+ Http: apicommon.Http{
+ Port: "9330",
+ },
+ },
+ Watchers: &v1alpha1.NotificationWatchers{
+ Sensors: []v1alpha1.SensorNotificationWatcher{
+ {
+ Name: "fake-sensor",
+ Namespace: common.DefaultControllerNamespace,
+ },
+ },
+ },
+ },
+}
+
+func TestResource_BuildServiceResource(t *testing.T) {
+ controller := newController()
+ opCtx := newGatewayContext(gatewayObj, controller)
+ svc := opCtx.gateway.Spec.Service.DeepCopy()
+ opCtx.gateway.Spec.Service = nil
+
+ // If no service is defined
+ service, err := opCtx.buildServiceResource()
+ assert.Nil(t, err)
+ assert.Nil(t, service)
+ opCtx.gateway.Spec.Service = svc
+
+ // If service is defined
+ service, err = opCtx.buildServiceResource()
+ assert.Nil(t, err)
+ assert.NotNil(t, service)
+
+ opCtx.gateway.Spec.Service.Name = ""
+ opCtx.gateway.Spec.Service.Namespace = ""
+
+ service, err = opCtx.buildServiceResource()
+ assert.Nil(t, err)
+ assert.NotNil(t, service)
+ assert.Equal(t, service.Name, opCtx.gateway.Name)
+ assert.Equal(t, service.Namespace, opCtx.gateway.Namespace)
+
+ newSvc, err := controller.k8sClient.CoreV1().Services(service.Namespace).Create(service)
+ assert.Nil(t, err)
+ assert.NotNil(t, newSvc)
+ assert.Equal(t, newSvc.Name, opCtx.gateway.Name)
+ assert.Equal(t, len(newSvc.Spec.Ports), 1)
+ assert.Equal(t, newSvc.Spec.Type, corev1.ServiceTypeLoadBalancer)
+}
+
+func TestResource_BuildDeploymentResource(t *testing.T) {
+ controller := newController()
+ ctx := newGatewayContext(gatewayObj, controller)
+ deployment, err := ctx.buildDeploymentResource()
+ assert.Nil(t, err)
+ assert.NotNil(t, deployment)
+
+ for _, container := range deployment.Spec.Template.Spec.Containers {
+ assert.NotNil(t, container.Env)
+ assert.Equal(t, container.Env[0].Name, common.EnvVarNamespace)
+ assert.Equal(t, container.Env[0].Value, ctx.gateway.Namespace)
+ assert.Equal(t, container.Env[1].Name, common.EnvVarEventSource)
+ assert.Equal(t, container.Env[1].Value, ctx.gateway.Spec.EventSourceRef.Name)
+ assert.Equal(t, container.Env[2].Name, common.EnvVarResourceName)
+ assert.Equal(t, container.Env[2].Value, ctx.gateway.Name)
+ assert.Equal(t, container.Env[3].Name, common.EnvVarControllerInstanceID)
+ assert.Equal(t, container.Env[3].Value, ctx.controller.Config.InstanceID)
+ assert.Equal(t, container.Env[4].Name, common.EnvVarGatewayServerPort)
+ assert.Equal(t, container.Env[4].Value, ctx.gateway.Spec.ProcessorPort)
+ }
+
+ newDeployment, err := controller.k8sClient.AppsV1().Deployments(deployment.Namespace).Create(deployment)
+ assert.Nil(t, err)
+ assert.NotNil(t, newDeployment)
+ assert.Equal(t, newDeployment.Labels[common.LabelOwnerName], ctx.gateway.Name)
+ assert.NotNil(t, newDeployment.Annotations[common.AnnotationResourceSpecHash])
+}
+
+func TestResource_CreateGatewayResource(t *testing.T) {
+ tests := []struct {
+ name string
+ updateFunc func(ctx *gatewayContext)
+ testFunc func(controller *Controller, ctx *gatewayContext, t *testing.T)
+ }{
+ {
+ name: "gateway with deployment and service",
+ updateFunc: func(ctx *gatewayContext) {},
+ testFunc: func(controller *Controller, ctx *gatewayContext, t *testing.T) {
+ deploymentMetadata := ctx.gateway.Status.Resources.Deployment
+ serviceMetadata := ctx.gateway.Status.Resources.Service
+ deployment, err := controller.k8sClient.AppsV1().Deployments(deploymentMetadata.Namespace).Get(deploymentMetadata.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, deployment)
+ service, err := controller.k8sClient.CoreV1().Services(serviceMetadata.Namespace).Get(serviceMetadata.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, service)
+ },
+ },
+ {
+ name: "gateway with zero deployment replica",
+ updateFunc: func(ctx *gatewayContext) {
+ ctx.gateway.Spec.Replica = 0
+ },
+ testFunc: func(controller *Controller, ctx *gatewayContext, t *testing.T) {
+ deploymentMetadata := ctx.gateway.Status.Resources.Deployment
+ deployment, err := controller.k8sClient.AppsV1().Deployments(deploymentMetadata.Namespace).Get(deploymentMetadata.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, deployment)
+ assert.Equal(t, *deployment.Spec.Replicas, int32(1))
+ },
+ },
+ {
+ name: "gateway with empty service template",
+ updateFunc: func(ctx *gatewayContext) {
+ ctx.gateway.Spec.Service = nil
+ },
+ testFunc: func(controller *Controller, ctx *gatewayContext, t *testing.T) {
+ deploymentMetadata := ctx.gateway.Status.Resources.Deployment
+ deployment, err := controller.k8sClient.AppsV1().Deployments(deploymentMetadata.Namespace).Get(deploymentMetadata.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, deployment)
+ assert.Nil(t, ctx.gateway.Status.Resources.Service)
+ },
+ },
+ {
+ name: "gateway with resources in different namespaces",
+ updateFunc: func(ctx *gatewayContext) {
+ ctx.gateway.Spec.Template.Namespace = "new-namespace"
+ ctx.gateway.Spec.Service.Namespace = "new-namespace"
+ },
+ testFunc: func(controller *Controller, ctx *gatewayContext, t *testing.T) {
+ deploymentMetadata := ctx.gateway.Status.Resources.Deployment
+ serviceMetadata := ctx.gateway.Status.Resources.Service
+ deployment, err := controller.k8sClient.AppsV1().Deployments(deploymentMetadata.Namespace).Get(deploymentMetadata.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, deployment)
+ service, err := controller.k8sClient.CoreV1().Services(serviceMetadata.Namespace).Get(serviceMetadata.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, service)
+ assert.NotEqual(t, ctx.gateway.Namespace, deployment.Namespace)
+ assert.NotEqual(t, ctx.gateway.Namespace, service.Namespace)
+ },
+ },
+ {
+ name: "gateway with resources with empty names and namespaces",
+ updateFunc: func(ctx *gatewayContext) {
+ ctx.gateway.Spec.Template.Name = ""
+ ctx.gateway.Spec.Service.Name = ""
+ },
+ testFunc: func(controller *Controller, ctx *gatewayContext, t *testing.T) {
+ deploymentMetadata := ctx.gateway.Status.Resources.Deployment
+ serviceMetadata := ctx.gateway.Status.Resources.Service
+ deployment, err := controller.k8sClient.AppsV1().Deployments(deploymentMetadata.Namespace).Get(deploymentMetadata.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, deployment)
+ service, err := controller.k8sClient.CoreV1().Services(serviceMetadata.Namespace).Get(serviceMetadata.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, service)
+ assert.Equal(t, ctx.gateway.Name, deployment.Name)
+ assert.Equal(t, ctx.gateway.Name, service.Name)
+ },
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ controller := newController()
+ ctx := newGatewayContext(gatewayObj.DeepCopy(), controller)
+ test.updateFunc(ctx)
+ err := ctx.createGatewayResources()
+ assert.Nil(t, err)
+ test.testFunc(controller, ctx, t)
+ })
+ }
+}
+
+func TestResource_UpdateGatewayResource(t *testing.T) {
+ controller := newController()
+ ctx := newGatewayContext(gatewayObj.DeepCopy(), controller)
+ err := ctx.createGatewayResources()
+ assert.Nil(t, err)
+
+ tests := []struct {
+ name string
+ updateFunc func()
+ testFunc func(t *testing.T, oldMetadata *v1alpha1.GatewayResource)
+ }{
+ {
+ name: "update deployment resource on gateway template change",
+ updateFunc: func() {
+ ctx.gateway.Spec.Template.Spec.Containers[0].ImagePullPolicy = corev1.PullIfNotPresent
+ ctx.gateway.Spec.Service.Spec.Type = corev1.ServiceTypeNodePort
+ },
+ testFunc: func(t *testing.T, oldMetadata *v1alpha1.GatewayResource) {
+ currentMetadata := ctx.gateway.Status.Resources
+ deployment, err := controller.k8sClient.AppsV1().Deployments(currentMetadata.Deployment.Namespace).Get(currentMetadata.Deployment.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, deployment)
+ assert.NotEqual(t, deployment.Annotations[common.AnnotationResourceSpecHash], oldMetadata.Deployment.Annotations[common.AnnotationResourceSpecHash])
+ service, err := controller.k8sClient.CoreV1().Services(currentMetadata.Service.Namespace).Get(currentMetadata.Service.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, service)
+ assert.NotEqual(t, service.Annotations[common.AnnotationResourceSpecHash], oldMetadata.Service.Annotations[common.AnnotationResourceSpecHash])
+ },
+ },
+ {
+ name: "delete service resource if gateway service spec is removed",
+ updateFunc: func() {
+ ctx.gateway.Spec.Service = nil
+ },
+ testFunc: func(t *testing.T, oldMetadata *v1alpha1.GatewayResource) {
+ currentMetadata := ctx.gateway.Status.Resources
+ deployment, err := controller.k8sClient.AppsV1().Deployments(currentMetadata.Deployment.Namespace).Get(currentMetadata.Deployment.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, deployment)
+ assert.Equal(t, deployment.Annotations[common.AnnotationResourceSpecHash], oldMetadata.Deployment.Annotations[common.AnnotationResourceSpecHash])
+ assert.Nil(t, ctx.gateway.Status.Resources.Service)
+ service, err := controller.k8sClient.CoreV1().Services(oldMetadata.Service.Namespace).Get(oldMetadata.Service.Name, metav1.GetOptions{})
+ assert.NotNil(t, err)
+ assert.Equal(t, apierror.IsNotFound(err), true)
+ assert.Nil(t, service)
+ },
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ metadata := ctx.gateway.Status.Resources.DeepCopy()
+ test.updateFunc()
+ err := ctx.updateGatewayResources()
+ assert.Nil(t, err)
+ test.testFunc(t, metadata)
+ })
+ }
+}
diff --git a/controllers/gateway/state.go b/controllers/gateway/state.go
deleted file mode 100644
index 7c61b47580..0000000000
--- a/controllers/gateway/state.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package gateway
-
-import (
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
- gwclient "github.com/argoproj/argo-events/pkg/client/gateway/clientset/versioned"
- "github.com/sirupsen/logrus"
- "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/util/wait"
-)
-
-// PersistUpdates of the gateway resource
-func PersistUpdates(client gwclient.Interface, gw *v1alpha1.Gateway, log *logrus.Logger) (*v1alpha1.Gateway, error) {
- gatewayClient := client.ArgoprojV1alpha1().Gateways(gw.ObjectMeta.Namespace)
-
- // in case persist update fails
- oldgw := gw.DeepCopy()
-
- gw, err := gatewayClient.Update(gw)
- if err != nil {
- log.WithError(err).Warn("error updating gateway")
- if errors.IsConflict(err) {
- return oldgw, err
- }
- log.Info("re-applying updates on latest version and retrying update")
- err = ReapplyUpdates(client, gw)
- if err != nil {
- log.WithError(err).Error("failed to re-apply update")
- return oldgw, err
- }
- }
- log.WithField(common.LabelPhase, string(gw.Status.Phase)).Info("gateway state updated successfully")
- return gw, nil
-}
-
-// ReapplyUpdates to gateway resource
-func ReapplyUpdates(client gwclient.Interface, gw *v1alpha1.Gateway) error {
- return wait.ExponentialBackoff(common.DefaultRetry, func() (bool, error) {
- gatewayClient := client.ArgoprojV1alpha1().Gateways(gw.Namespace)
- g, err := gatewayClient.Update(gw)
- if err != nil {
- if !common.IsRetryableKubeAPIError(err) {
- return false, err
- }
- return false, nil
- }
- gw = g
- return true, nil
- })
-}
diff --git a/controllers/gateway/state_test.go b/controllers/gateway/state_test.go
deleted file mode 100644
index 9923e0b525..0000000000
--- a/controllers/gateway/state_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package gateway
-
-import (
- "github.com/argoproj/argo-events/common"
- "testing"
-
- "github.com/argoproj/argo-events/pkg/client/gateway/clientset/versioned/fake"
- "github.com/smartystreets/goconvey/convey"
-)
-
-func TestPersistUpdates(t *testing.T) {
- convey.Convey("Given a gateway resource", t, func() {
- namespace := "argo-events"
- client := fake.NewSimpleClientset()
- logger := common.NewArgoEventsLogger()
- gw, err := getGateway()
- convey.So(err, convey.ShouldBeNil)
-
- convey.Convey("Create the gateway", func() {
- gw, err = client.ArgoprojV1alpha1().Gateways(namespace).Create(gw)
- convey.So(err, convey.ShouldBeNil)
- convey.So(gw, convey.ShouldNotBeNil)
-
- gw.ObjectMeta.Labels = map[string]string{
- "default": "default",
- }
-
- convey.Convey("Update the gateway", func() {
- updatedGw, err := PersistUpdates(client, gw, logger)
- convey.So(err, convey.ShouldBeNil)
- convey.So(updatedGw, convey.ShouldNotEqual, gw)
- convey.So(updatedGw.Labels, convey.ShouldResemble, gw.Labels)
-
- updatedGw.Labels["new"] = "new"
-
- convey.Convey("Reapply the gateway", func() {
- err := ReapplyUpdates(client, updatedGw)
- convey.So(err, convey.ShouldBeNil)
- convey.So(len(updatedGw.Labels), convey.ShouldEqual, 2)
- })
- })
- })
- })
-}
diff --git a/controllers/gateway/validate.go b/controllers/gateway/validate.go
index 73860ff8a2..b1dbb51a4c 100644
--- a/controllers/gateway/validate.go
+++ b/controllers/gateway/validate.go
@@ -17,50 +17,50 @@ limitations under the License.
package gateway
import (
- "fmt"
apicommon "github.com/argoproj/argo-events/pkg/apis/common"
"github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
+ "github.com/pkg/errors"
)
// Validate validates the gateway resource.
-// Exporting this function so that external APIs can use this to validate gateway resource.
-func Validate(gw *v1alpha1.Gateway) error {
- if gw.Spec.Template == nil {
- return fmt.Errorf("gateway pod template is not specified")
+func Validate(gatewayObj *v1alpha1.Gateway) error {
+ if gatewayObj.Spec.Template == nil {
+ return errors.New("gateway pod template is not specified")
}
- if gw.Spec.Type == "" {
- return fmt.Errorf("gateway type is not specified")
+ if gatewayObj.Spec.Type == "" {
+ return errors.New("gateway type is not specified")
}
- if gw.Spec.EventSource == "" {
- return fmt.Errorf("event source for the gateway is not specified")
+ if gatewayObj.Spec.EventSourceRef == nil {
+ return errors.New("event source for the gateway is not specified")
}
- if gw.Spec.ProcessorPort == "" {
- return fmt.Errorf("gateway processor port is not specified")
+
+ if gatewayObj.Spec.ProcessorPort == "" {
+ return errors.New("gateway processor port is not specified")
}
- switch gw.Spec.EventProtocol.Type {
+ switch gatewayObj.Spec.EventProtocol.Type {
case apicommon.HTTP:
- if gw.Spec.Watchers == nil || (gw.Spec.Watchers.Gateways == nil && gw.Spec.Watchers.Sensors == nil) {
- return fmt.Errorf("no associated watchers with gateway")
+ if gatewayObj.Spec.Watchers == nil || (gatewayObj.Spec.Watchers.Gateways == nil && gatewayObj.Spec.Watchers.Sensors == nil) {
+ return errors.New("no associated watchers with gateway")
}
- if gw.Spec.EventProtocol.Http.Port == "" {
- return fmt.Errorf("http server port is not defined")
+ if gatewayObj.Spec.EventProtocol.Http.Port == "" {
+ return errors.New("http server port is not defined")
}
case apicommon.NATS:
- if gw.Spec.EventProtocol.Nats.URL == "" {
- return fmt.Errorf("nats url is not defined")
+ if gatewayObj.Spec.EventProtocol.Nats.URL == "" {
+ return errors.New("nats url is not defined")
}
- if gw.Spec.EventProtocol.Nats.Type == "" {
- return fmt.Errorf("nats service type is not defined")
+ if gatewayObj.Spec.EventProtocol.Nats.Type == "" {
+ return errors.New("nats service type is not defined")
}
- if gw.Spec.EventProtocol.Nats.Type == apicommon.Streaming && gw.Spec.EventProtocol.Nats.ClientId == "" {
- return fmt.Errorf("client id must be specified when using nats streaming")
+ if gatewayObj.Spec.EventProtocol.Nats.Type == apicommon.Streaming && gatewayObj.Spec.EventProtocol.Nats.ClientId == "" {
+ return errors.New("client id must be specified when using nats streaming")
}
- if gw.Spec.EventProtocol.Nats.Type == apicommon.Streaming && gw.Spec.EventProtocol.Nats.ClusterId == "" {
- return fmt.Errorf("cluster id must be specified when using nats streaming")
+ if gatewayObj.Spec.EventProtocol.Nats.Type == apicommon.Streaming && gatewayObj.Spec.EventProtocol.Nats.ClusterId == "" {
+ return errors.New("cluster id must be specified when using nats streaming")
}
default:
- return fmt.Errorf("unknown gateway type")
+ return errors.New("unknown gateway type")
}
return nil
}
diff --git a/controllers/gateway/validate_test.go b/controllers/gateway/validate_test.go
index 47631c1dcb..a537b87d3e 100644
--- a/controllers/gateway/validate_test.go
+++ b/controllers/gateway/validate_test.go
@@ -18,28 +18,25 @@ package gateway
import (
"fmt"
- "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
- "github.com/ghodss/yaml"
"io/ioutil"
"testing"
- "github.com/smartystreets/goconvey/convey"
+ "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/stretchr/testify/assert"
)
func TestValidate(t *testing.T) {
dir := "../../examples/gateways"
- convey.Convey("Validate list of gateways", t, func() {
- files, err := ioutil.ReadDir(dir)
- convey.So(err, convey.ShouldBeNil)
- for _, file := range files {
- fmt.Println("filename: ", file.Name())
- content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", dir, file.Name()))
- convey.So(err, convey.ShouldBeNil)
- var gateway *v1alpha1.Gateway
- err = yaml.Unmarshal([]byte(content), &gateway)
- convey.So(err, convey.ShouldBeNil)
- err = Validate(gateway)
- convey.So(err, convey.ShouldBeNil)
- }
- })
+ files, err := ioutil.ReadDir(dir)
+ assert.Nil(t, err)
+ for _, file := range files {
+ content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", dir, file.Name()))
+ assert.Nil(t, err)
+ var gateway *v1alpha1.Gateway
+ err = yaml.Unmarshal([]byte(content), &gateway)
+ assert.Nil(t, err)
+ err = Validate(gateway)
+ assert.Nil(t, err)
+ }
}
diff --git a/cmd/controllers/sensor/main.go b/controllers/sensor/cmd/main.go
similarity index 78%
rename from cmd/controllers/sensor/main.go
rename to controllers/sensor/cmd/main.go
index 663897c9a1..aaaa9ac867 100644
--- a/cmd/controllers/sensor/main.go
+++ b/controllers/sensor/cmd/main.go
@@ -33,24 +33,24 @@ func main() {
}
// sensor-controller configuration
- configMap, ok := os.LookupEnv(common.EnvVarSensorControllerConfigMap)
+ configMap, ok := os.LookupEnv(common.EnvVarControllerConfigMap)
if !ok {
- configMap = common.DefaultConfigMapName(common.LabelSensorControllerName)
+ panic("controller configmap is not provided")
}
- namespace, ok := os.LookupEnv(common.SensorNamespace)
+ namespace, ok := os.LookupEnv(common.EnvVarNamespace)
if !ok {
namespace = common.DefaultControllerNamespace
}
// create a new sensor controller
- controller := sensor.NewSensorController(restConfig, configMap, namespace)
+ controller := sensor.NewController(restConfig, configMap, namespace)
// watch updates to sensor controller configuration
err = controller.ResyncConfig(namespace)
if err != nil {
panic(err)
}
- go controller.Run(context.Background(), 1, 1)
+ go controller.Run(context.Background(), 1)
select {}
}
diff --git a/gateways/community/slack/config_test.go b/controllers/sensor/common.go
similarity index 50%
rename from gateways/community/slack/config_test.go
rename to controllers/sensor/common.go
index b07e62fdd7..4867b4ef3c 100644
--- a/gateways/community/slack/config_test.go
+++ b/controllers/sensor/common.go
@@ -14,29 +14,16 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package slack
+package sensor
-import (
- "github.com/smartystreets/goconvey/convey"
- "testing"
-)
-
-var es = `
-hook:
- endpoint: "/"
- port: "8080"
- url: "testurl"
-token:
- name: fake-token
- key: fake
-`
+import "github.com/argoproj/argo-events/pkg/apis/sensor"
-func TestParseConfig(t *testing.T) {
- convey.Convey("Given a slack event source, parse it", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
- _, ok := ps.(*slackEventSource)
- convey.So(ok, convey.ShouldEqual, true)
- })
-}
+// Labels
+const (
+ //LabelControllerInstanceID is the label which allows to separate application among multiple running controllers.
+ LabelControllerInstanceID = sensor.FullName + "/sensor-controller-instanceid"
+ // LabelPhase is a label applied to sensors to indicate the current phase of the sensor (for filtering purposes)
+ LabelPhase = sensor.FullName + "/phase"
+ // LabelComplete is the label to mark sensors as complete
+ LabelComplete = sensor.FullName + "/complete"
+)
diff --git a/controllers/sensor/config.go b/controllers/sensor/config.go
index 2515e6c838..d0f9f23732 100644
--- a/controllers/sensor/config.go
+++ b/controllers/sensor/config.go
@@ -19,9 +19,10 @@ package sensor
import (
"context"
"fmt"
- "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/common"
"github.com/ghodss/yaml"
+ "github.com/pkg/errors"
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -32,49 +33,49 @@ import (
)
// watchControllerConfigMap watches updates to sensor controller configmap
-func (c *SensorController) watchControllerConfigMap(ctx context.Context) (cache.Controller, error) {
- log.Info("watching sensor-controller config map updates")
- source := c.newControllerConfigMapWatch()
- _, controller := cache.NewInformer(
+func (controller *Controller) watchControllerConfigMap(ctx context.Context) (cache.Controller, error) {
+ log.Info("watching controller config map updates")
+ source := controller.newControllerConfigMapWatch()
+ _, ctrl := cache.NewInformer(
source,
&corev1.ConfigMap{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
if cm, ok := obj.(*corev1.ConfigMap); ok {
- log.Info("detected EventSource update. updating the sensor-controller config.")
- err := c.updateConfig(cm)
+ log.Info("detected configuration update. updating the controller configuration")
+ err := controller.updateConfig(cm)
if err != nil {
- log.Errorf("update of config failed due to: %v", err)
+ log.Errorf("update of controller configuration failed due to: %v", err)
}
}
},
UpdateFunc: func(old, new interface{}) {
if newCm, ok := new.(*corev1.ConfigMap); ok {
- log.Info("detected EventSource update. updating the sensor-controller config.")
- err := c.updateConfig(newCm)
+ log.Info("detected configuration update. updating the controller configuration")
+ err := controller.updateConfig(newCm)
if err != nil {
- log.Errorf("update of config failed due to: %v", err)
+ log.Errorf("update of controller configuration failed due to: %v", err)
}
}
},
})
- go controller.Run(ctx.Done())
- return controller, nil
+ go ctrl.Run(ctx.Done())
+ return ctrl, nil
}
// newControllerConfigMapWatch returns a configmap watcher
-func (c *SensorController) newControllerConfigMapWatch() *cache.ListWatch {
- x := c.kubeClientset.CoreV1().RESTClient()
+func (controller *Controller) newControllerConfigMapWatch() *cache.ListWatch {
+ x := controller.k8sClient.CoreV1().RESTClient()
resource := "configmaps"
- name := c.ConfigMap
+ name := controller.ConfigMap
fieldSelector := fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", name))
listFunc := func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fieldSelector.String()
req := x.Get().
- Namespace(c.Namespace).
+ Namespace(controller.Namespace).
Resource(resource).
VersionedParams(&options, metav1.ParameterCodec)
return req.Do().Get()
@@ -83,7 +84,7 @@ func (c *SensorController) newControllerConfigMapWatch() *cache.ListWatch {
options.Watch = true
options.FieldSelector = fieldSelector.String()
req := x.Get().
- Namespace(c.Namespace).
+ Namespace(controller.Namespace).
Resource(resource).
VersionedParams(&options, metav1.ParameterCodec)
return req.Watch()
@@ -91,26 +92,26 @@ func (c *SensorController) newControllerConfigMapWatch() *cache.ListWatch {
return &cache.ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}
}
-// ResyncConfig reloads the sensor-controller config from the configmap
-func (c *SensorController) ResyncConfig(namespace string) error {
- cmClient := c.kubeClientset.CoreV1().ConfigMaps(namespace)
- cm, err := cmClient.Get(c.ConfigMap, metav1.GetOptions{})
+// ResyncConfig reloads the controller config from the configmap
+func (controller *Controller) ResyncConfig(namespace string) error {
+ cmClient := controller.k8sClient.CoreV1().ConfigMaps(namespace)
+ cm, err := cmClient.Get(controller.ConfigMap, metav1.GetOptions{})
if err != nil {
return err
}
- return c.updateConfig(cm)
+ return controller.updateConfig(cm)
}
-func (c *SensorController) updateConfig(cm *corev1.ConfigMap) error {
- configStr, ok := cm.Data[common.SensorControllerConfigMapKey]
+func (controller *Controller) updateConfig(cm *corev1.ConfigMap) error {
+ configStr, ok := cm.Data[common.ControllerConfigMapKey]
if !ok {
- return fmt.Errorf("configMap '%s' does not have key '%s'", c.ConfigMap, common.SensorControllerConfigMapKey)
+ return errors.Errorf("configMap '%s' does not have key '%s'", controller.ConfigMap, common.ControllerConfigMapKey)
}
- var config SensorControllerConfig
+ var config ControllerConfig
err := yaml.Unmarshal([]byte(configStr), &config)
if err != nil {
return err
}
- c.Config = config
+ controller.Config = config
return nil
}
diff --git a/controllers/sensor/config_test.go b/controllers/sensor/config_test.go
index 87ea54b68a..901c286015 100644
--- a/controllers/sensor/config_test.go
+++ b/controllers/sensor/config_test.go
@@ -20,52 +20,26 @@ import (
"testing"
"github.com/argoproj/argo-events/common"
- "github.com/smartystreets/goconvey/convey"
+ "github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestSensorControllerConfigWatch(t *testing.T) {
- sc := getSensorController()
-
- convey.Convey("Given a sensor", t, func() {
- convey.Convey("Create a new watch and make sure watcher is not nil", func() {
- watcher := sc.newControllerConfigMapWatch()
- convey.So(watcher, convey.ShouldNotBeNil)
- })
- })
-
- convey.Convey("Given a sensor, resync config", t, func() {
- convey.Convey("Update a sensor configmap with new instance id and remove namespace", func() {
- cmObj := &corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: common.DefaultControllerNamespace,
- Name: sc.ConfigMap,
- },
- Data: map[string]string{
- common.SensorControllerConfigMapKey: `instanceID: fake-instance-id`,
- },
- }
- cm, err := sc.kubeClientset.CoreV1().ConfigMaps(sc.Namespace).Create(cmObj)
- convey.Convey("Make sure no error occurs", func() {
- convey.So(err, convey.ShouldBeNil)
-
- convey.Convey("Updated sensor configmap must be non-nil", func() {
- convey.So(cm, convey.ShouldNotBeNil)
-
- convey.Convey("Resync the sensor configuration", func() {
- err := sc.ResyncConfig(cmObj.Namespace)
- convey.Convey("No error should occur while resyncing sensor configuration", func() {
- convey.So(err, convey.ShouldBeNil)
-
- convey.Convey("The updated instance id must be fake-instance-id", func() {
- convey.So(sc.Config.InstanceID, convey.ShouldEqual, "fake-instance-id")
- convey.So(sc.Config.Namespace, convey.ShouldBeEmpty)
- })
- })
- })
- })
- })
- })
- })
+ sensorController := getController()
+ configmap := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: common.DefaultControllerNamespace,
+ Name: sensorController.ConfigMap,
+ },
+ Data: map[string]string{
+ common.ControllerConfigMapKey: `instanceID: fake-instance-id`,
+ },
+ }
+ cm, err := sensorController.k8sClient.CoreV1().ConfigMaps(sensorController.Namespace).Create(configmap)
+ assert.Nil(t, err)
+ assert.NotNil(t, cm)
+ err = sensorController.ResyncConfig(sensorController.Namespace)
+ assert.Nil(t, err)
+ assert.Equal(t, sensorController.Config.InstanceID, "fake-instance-id")
}
diff --git a/controllers/sensor/controller.go b/controllers/sensor/controller.go
index edf99cbfd1..b2923d670e 100644
--- a/controllers/sensor/controller.go
+++ b/controllers/sensor/controller.go
@@ -20,93 +20,85 @@ import (
"context"
"errors"
"fmt"
- "github.com/argoproj/argo-events/pkg/apis/sensor"
- "github.com/sirupsen/logrus"
"time"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
+ base "github.com/argoproj/argo-events"
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/pkg/apis/sensor"
+ "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
+ clientset "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned"
+ "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/wait"
- "k8s.io/client-go/informers"
- informersv1 "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
-
- base "github.com/argoproj/argo-events"
- "github.com/argoproj/argo-events/common"
- ccommon "github.com/argoproj/argo-events/controllers/common"
- "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
- clientset "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned"
)
// informer constants
const (
- sensorResyncPeriod = 20 * time.Minute
- sensorResourceResyncPeriod = 30 * time.Minute
- rateLimiterBaseDelay = 5 * time.Second
- rateLimiterMaxDelay = 1000 * time.Second
+ sensorResyncPeriod = 20 * time.Minute
+ rateLimiterBaseDelay = 5 * time.Second
+ rateLimiterMaxDelay = 1000 * time.Second
)
-// SensorControllerConfig contain the configuration settings for the sensor-controller
-type SensorControllerConfig struct {
- // InstanceID is a label selector to limit the sensor-controller's watch of sensor jobs to a specific instance.
- // If omitted, the sensor-controller watches sensors that *are not* labeled with an instance id.
+// ControllerConfig contain the configuration settings for the controller
+type ControllerConfig struct {
+ // InstanceID is a label selector to limit the controller'sensor watch of sensor jobs to a specific instance.
+ // If omitted, the controller watches sensors that *are not* labeled with an instance id.
InstanceID string
-
- // Namespace is a label selector filter to limit sensor-controller's watch to specific namespace
+ // Namespace is a label selector filter to limit controller'sensor watch to specific namespace
Namespace string
}
-// SensorController listens for new sensors and hands off handling of each sensor on the queue to the operator
-type SensorController struct {
- // EventSource is the name of the config map in which to derive configuration of the contoller
+// Controller listens for new sensors and hands off handling of each sensor on the queue to the operator
+type Controller struct {
+ // ConfigMap is the name of the config map in which to derive configuration of the controller
ConfigMap string
- // Namespace for sensor controller
+ // Namespace for controller
Namespace string
- // Config is the sensor-controller's configuration
- Config SensorControllerConfig
- // log is the logger for a gateway
- log *logrus.Logger
-
- // kubernetes config and apis
- kubeConfig *rest.Config
- kubeClientset kubernetes.Interface
- sensorClientset clientset.Interface
-
- // sensor informer and queue
- podInformer informersv1.PodInformer
- svcInformer informersv1.ServiceInformer
- informer cache.SharedIndexInformer
- queue workqueue.RateLimitingInterface
+ // Config is the controller'sensor configuration
+ Config ControllerConfig
+ // logger to logger stuff
+ logger *logrus.Logger
+ // kubeConfig is the rest K8s config
+ kubeConfig *rest.Config
+ // k8sClient is the Kubernetes client
+ k8sClient kubernetes.Interface
+ // sensorClient is the client for operations on the sensor custom resource
+ sensorClient clientset.Interface
+ // informer for sensor resource updates
+ informer cache.SharedIndexInformer
+ // queue to process watched sensor resources
+ queue workqueue.RateLimitingInterface
}
-// NewSensorController creates a new Controller
-func NewSensorController(rest *rest.Config, configMap, namespace string) *SensorController {
+// NewController creates a new Controller
+func NewController(rest *rest.Config, configMap, namespace string) *Controller {
rateLimiter := workqueue.NewItemExponentialFailureRateLimiter(rateLimiterBaseDelay, rateLimiterMaxDelay)
- return &SensorController{
- ConfigMap: configMap,
- Namespace: namespace,
- kubeConfig: rest,
- kubeClientset: kubernetes.NewForConfigOrDie(rest),
- sensorClientset: clientset.NewForConfigOrDie(rest),
- queue: workqueue.NewRateLimitingQueue(rateLimiter),
- log: common.NewArgoEventsLogger(),
+ return &Controller{
+ ConfigMap: configMap,
+ Namespace: namespace,
+ kubeConfig: rest,
+ k8sClient: kubernetes.NewForConfigOrDie(rest),
+ sensorClient: clientset.NewForConfigOrDie(rest),
+ queue: workqueue.NewRateLimitingQueue(rateLimiter),
+ logger: common.NewArgoEventsLogger(),
}
}
-func (c *SensorController) processNextItem() bool {
+// processNextItem processes the sensor resource object on the queue
+func (controller *Controller) processNextItem() bool {
// Wait until there is a new item in the queue
- key, quit := c.queue.Get()
+ key, quit := controller.queue.Get()
if quit {
return false
}
- defer c.queue.Done(key)
+ defer controller.queue.Done(key)
- obj, exists, err := c.informer.GetIndexer().GetByKey(key.(string))
+ obj, exists, err := controller.informer.GetIndexer().GetByKey(key.(string))
if err != nil {
- c.log.WithField(common.LabelSensorName, key.(string)).WithError(err).Warn("failed to get sensor from informer index")
+ controller.logger.WithField(common.LabelSensorName, key.(string)).WithError(err).Warnln("failed to get sensor from informer index")
return true
}
@@ -117,21 +109,21 @@ func (c *SensorController) processNextItem() bool {
s, ok := obj.(*v1alpha1.Sensor)
if !ok {
- c.log.WithField(common.LabelSensorName, key.(string)).WithError(err).Warn("key in index is not a sensor")
+ controller.logger.WithField(common.LabelSensorName, key.(string)).WithError(err).Warnln("key in index is not a sensor")
return true
}
- ctx := newSensorOperationCtx(s, c)
+ ctx := newSensorContext(s, controller)
err = ctx.operate()
if err != nil {
- if err := common.GenerateK8sEvent(c.kubeClientset,
+ if err := common.GenerateK8sEvent(controller.k8sClient,
fmt.Sprintf("failed to operate on sensor %s", s.Name),
common.EscalationEventType,
"sensor operation failed",
s.Name,
s.Namespace,
- c.Config.InstanceID,
+ controller.Config.InstanceID,
sensor.Kind,
map[string]string{
common.LabelSensorName: s.Name,
@@ -139,96 +131,73 @@ func (c *SensorController) processNextItem() bool {
common.LabelOperation: "controller_operation",
},
); err != nil {
- ctx.log.WithError(err).Error("failed to create K8s event to escalate sensor operation failure")
+ ctx.logger.WithError(err).Errorln("failed to create K8s event to escalate sensor operation failure")
}
}
- err = c.handleErr(err, key)
+ err = controller.handleErr(err, key)
if err != nil {
- ctx.log.WithError(err).Error("sensor controller is unable to handle the error")
+ ctx.logger.WithError(err).Errorln("controller is unable to handle the error")
}
return true
}
// handleErr checks if an error happened and make sure we will retry later
// returns an error if unable to handle the error
-func (c *SensorController) handleErr(err error, key interface{}) error {
+func (controller *Controller) handleErr(err error, key interface{}) error {
if err == nil {
// Forget about the #AddRateLimited history of key on every successful sync
// Ensure future updates for this key are not delayed because of outdated error history
- c.queue.Forget(key)
+ controller.queue.Forget(key)
return nil
}
// due to the base delay of 5ms of the DefaultControllerRateLimiter
- // requeues will happen very quickly even after a sensor pod goes down
- // we want to give the sensor pod a chance to come back up so we give a genorous number of retries
- if c.queue.NumRequeues(key) < 20 {
+ // re-queues will happen very quickly even after a sensor pod goes down
+ // we want to give the sensor pod a chance to come back up so we give a generous number of retries
+ if controller.queue.NumRequeues(key) < 20 {
// Re-enqueue the key rate limited. This key will be processed later again.
- c.queue.AddRateLimited(key)
+ controller.queue.AddRateLimited(key)
return nil
}
- return errors.New("exceeded max requeues")
+ return errors.New("exceeded max re-queues")
}
-// Run executes the sensor-controller
-func (c *SensorController) Run(ctx context.Context, ssThreads, eventThreads int) {
- defer c.queue.ShutDown()
+// Run executes the controller
+func (controller *Controller) Run(ctx context.Context, threads int) {
+ defer controller.queue.ShutDown()
- c.log.WithFields(
+ controller.logger.WithFields(
map[string]interface{}{
- common.LabelInstanceID: c.Config.InstanceID,
+ common.LabelInstanceID: controller.Config.InstanceID,
common.LabelVersion: base.GetVersion().Version,
- }).Info("starting sensor controller")
- _, err := c.watchControllerConfigMap(ctx)
- if err != nil {
- c.log.WithError(err).Error("failed to register watch for sensor controller config map")
- return
- }
+ }).Infoln("starting the controller...")
- c.informer = c.newSensorInformer()
- go c.informer.Run(ctx.Done())
-
- if !cache.WaitForCacheSync(ctx.Done(), c.informer.HasSynced) {
- c.log.Panic("timed out waiting for the caches to sync for sensors")
+ _, err := controller.watchControllerConfigMap(ctx)
+ if err != nil {
+ controller.logger.WithError(err).Error("failed to register watch for controller config map")
return
}
- listOptionsFunc := func(options *metav1.ListOptions) {
- labelSelector := labels.NewSelector().Add(c.instanceIDReq())
- options.LabelSelector = labelSelector.String()
- }
- factory := ccommon.ArgoEventInformerFactory{
- OwnerGroupVersionKind: v1alpha1.SchemaGroupVersionKind,
- OwnerInformer: c.informer,
- SharedInformerFactory: informers.NewFilteredSharedInformerFactory(c.kubeClientset, sensorResourceResyncPeriod, c.Config.Namespace, listOptionsFunc),
- Queue: c.queue,
- }
-
- c.podInformer = factory.NewPodInformer()
- go c.podInformer.Informer().Run(ctx.Done())
-
- if !cache.WaitForCacheSync(ctx.Done(), c.podInformer.Informer().HasSynced) {
- c.log.Panic("timed out waiting for the caches to sync for sensor pods")
- return
+ controller.informer, err = controller.newSensorInformer()
+ if err != nil {
+ controller.logger.WithError(err).Errorln("failed to create a new sensor controller")
}
+ go controller.informer.Run(ctx.Done())
- c.svcInformer = factory.NewServiceInformer()
- go c.svcInformer.Informer().Run(ctx.Done())
-
- if !cache.WaitForCacheSync(ctx.Done(), c.svcInformer.Informer().HasSynced) {
- c.log.Panic("timed out waiting for the caches to sync for sensor services")
+ if !cache.WaitForCacheSync(ctx.Done(), controller.informer.HasSynced) {
+ controller.logger.Panic("timed out waiting for the caches to sync for sensors")
return
}
- for i := 0; i < ssThreads; i++ {
- go wait.Until(c.runWorker, time.Second, ctx.Done())
+ for i := 0; i < threads; i++ {
+ go wait.Until(controller.runWorker, time.Second, ctx.Done())
}
<-ctx.Done()
}
-func (c *SensorController) runWorker() {
- for c.processNextItem() {
+func (controller *Controller) runWorker() {
+ for controller.processNextItem() {
}
}
diff --git a/controllers/sensor/controller_test.go b/controllers/sensor/controller_test.go
index 6130abcb8c..94b00b6a98 100644
--- a/controllers/sensor/controller_test.go
+++ b/controllers/sensor/controller_test.go
@@ -19,18 +19,13 @@ package sensor
import (
"fmt"
"testing"
- "time"
"github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
fakesensor "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned/fake"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
+ "github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/client-go/informers"
- "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
- "k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
@@ -39,84 +34,55 @@ var (
SensorControllerInstanceID = "argo-events"
)
-func getFakePodSharedIndexInformer(clientset kubernetes.Interface) cache.SharedIndexInformer {
- // NewListWatchFromClient doesn't work with fake client.
- // ref: https://github.com/kubernetes/client-go/issues/352
- return cache.NewSharedIndexInformer(&cache.ListWatch{
- ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
- return clientset.CoreV1().Pods("").List(options)
- },
- WatchFunc: clientset.CoreV1().Pods("").Watch,
- }, &corev1.Pod{}, 1*time.Second, cache.Indexers{})
-}
-
-func getSensorController() *SensorController {
+func getController() *Controller {
clientset := fake.NewSimpleClientset()
- done := make(chan struct{})
- informer := getFakePodSharedIndexInformer(clientset)
- go informer.Run(done)
- factory := informers.NewSharedInformerFactory(clientset, 0)
- podInformer := factory.Core().V1().Pods()
- go podInformer.Informer().Run(done)
- svcInformer := factory.Core().V1().Services()
- go svcInformer.Informer().Run(done)
- return &SensorController{
+ controller := &Controller{
ConfigMap: SensorControllerConfigmap,
Namespace: common.DefaultControllerNamespace,
- Config: SensorControllerConfig{
+ Config: ControllerConfig{
Namespace: common.DefaultControllerNamespace,
InstanceID: SensorControllerInstanceID,
},
- kubeClientset: clientset,
- sensorClientset: fakesensor.NewSimpleClientset(),
- podInformer: podInformer,
- svcInformer: svcInformer,
- informer: informer,
- queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
- log: common.NewArgoEventsLogger(),
+ k8sClient: clientset,
+ sensorClient: fakesensor.NewSimpleClientset(),
+ queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
+ logger: common.NewArgoEventsLogger(),
}
+ informer, err := controller.newSensorInformer()
+ if err != nil {
+ panic(err)
+ }
+ controller.informer = informer
+ return controller
}
-func TestGatewayController(t *testing.T) {
- convey.Convey("Given a sensor controller, process queue items", t, func() {
- controller := getSensorController()
-
- convey.Convey("Create a resource queue, add new item and process it", func() {
- controller.queue = workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
- controller.informer = controller.newSensorInformer()
- controller.queue.Add("hi")
- res := controller.processNextItem()
-
- convey.Convey("Item from queue must be successfully processed", func() {
- convey.So(res, convey.ShouldBeTrue)
- })
-
- convey.Convey("Shutdown queue and make sure queue does not process next item", func() {
- controller.queue.ShutDown()
- res := controller.processNextItem()
- convey.So(res, convey.ShouldBeFalse)
- })
- })
+func TestController_ProcessNextItem(t *testing.T) {
+ controller := getController()
+ err := controller.informer.GetIndexer().Add(&v1alpha1.Sensor{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "fake-sensor",
+ Namespace: "fake-namespace",
+ },
+ Spec: v1alpha1.SensorSpec{},
})
+ assert.Nil(t, err)
+ controller.queue.Add("fake-sensor")
+ res := controller.processNextItem()
+ assert.Equal(t, res, true)
+ controller.queue.ShutDown()
+ res = controller.processNextItem()
+ assert.Equal(t, res, false)
+}
- convey.Convey("Given a sensor controller, handle errors in queue", t, func() {
- controller := getSensorController()
- convey.Convey("Create a resource queue and add an item", func() {
- controller.queue = workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
- controller.queue.Add("hi")
- convey.Convey("Handle an nil error", func() {
- err := controller.handleErr(nil, "hi")
- convey.So(err, convey.ShouldBeNil)
- })
- convey.Convey("Exceed max requeues", func() {
- controller.queue.Add("bye")
- var err error
- for i := 0; i < 21; i++ {
- err = controller.handleErr(fmt.Errorf("real error"), "bye")
- }
- convey.So(err, convey.ShouldNotBeNil)
- convey.So(err.Error(), convey.ShouldEqual, "exceeded max requeues")
- })
- })
- })
+func TestController_HandleErr(t *testing.T) {
+ controller := getController()
+ controller.queue.Add("hi")
+ err := controller.handleErr(nil, "hi")
+ assert.Nil(t, err)
+ controller.queue.Add("bye")
+ for i := 0; i < 21; i++ {
+ err = controller.handleErr(fmt.Errorf("real error"), "bye")
+ }
+ assert.NotNil(t, err)
+ assert.Equal(t, err.Error(), "exceeded max re-queues")
}
diff --git a/controllers/sensor/informer.go b/controllers/sensor/informer.go
index 7965c6bb74..8bcfbb31a7 100644
--- a/controllers/sensor/informer.go
+++ b/controllers/sensor/informer.go
@@ -17,49 +17,43 @@ limitations under the License.
package sensor
import (
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
+ sensorinformers "github.com/argoproj/argo-events/pkg/client/sensor/informers/externalversions"
+ "github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
- "k8s.io/client-go/tools/cache"
-
- sensorinformers "github.com/argoproj/argo-events/pkg/client/sensor/informers/externalversions"
"k8s.io/apimachinery/pkg/selection"
+ "k8s.io/client-go/tools/cache"
)
-func (c *SensorController) instanceIDReq() labels.Requirement {
+func (controller *Controller) instanceIDReq() (*labels.Requirement, error) {
var instanceIDReq *labels.Requirement
var err error
- if c.Config.InstanceID == "" {
- panic("controller instance id must be specified")
+ if controller.Config.InstanceID == "" {
+ return nil, errors.New("controller instance id must be specified")
}
- instanceIDReq, err = labels.NewRequirement(common.LabelKeySensorControllerInstanceID, selection.Equals, []string{c.Config.InstanceID})
+ instanceIDReq, err = labels.NewRequirement(LabelControllerInstanceID, selection.Equals, []string{controller.Config.InstanceID})
if err != nil {
panic(err)
}
- return *instanceIDReq
+ return instanceIDReq, nil
}
-func (c *SensorController) versionReq() labels.Requirement {
- versionReq, err := labels.NewRequirement(common.LabelArgoEventsSensorVersion, selection.Equals, []string{v1alpha1.ArgoEventsSensorVersion})
+// The sensor informer adds new sensors to the controller'sensor queue based on Add, Update, and Delete event handlers for the sensor resources
+func (controller *Controller) newSensorInformer() (cache.SharedIndexInformer, error) {
+ labelSelector, err := controller.instanceIDReq()
if err != nil {
- panic(err)
+ return nil, err
}
- return *versionReq
-}
-// The sensor informer adds new Sensors to the sensor-controller's queue based on Add, Update, and Delete Event Handlers for the Sensor Resources
-func (c *SensorController) newSensorInformer() cache.SharedIndexInformer {
- sensorInformerFactory := sensorinformers.NewFilteredSharedInformerFactory(
- c.sensorClientset,
+ sensorInformerFactory := sensorinformers.NewSharedInformerFactoryWithOptions(
+ controller.sensorClient,
sensorResyncPeriod,
- c.Config.Namespace,
- func(options *metav1.ListOptions) {
+ sensorinformers.WithNamespace(controller.Config.Namespace),
+ sensorinformers.WithTweakListOptions(func(options *metav1.ListOptions) {
options.FieldSelector = fields.Everything().String()
- labelSelector := labels.NewSelector().Add(c.instanceIDReq(), c.versionReq())
options.LabelSelector = labelSelector.String()
- },
+ }),
)
informer := sensorInformerFactory.Argoproj().V1alpha1().Sensors().Informer()
informer.AddEventHandler(
@@ -67,22 +61,22 @@ func (c *SensorController) newSensorInformer() cache.SharedIndexInformer {
AddFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
if err == nil {
- c.queue.Add(key)
+ controller.queue.Add(key)
}
},
UpdateFunc: func(old, new interface{}) {
key, err := cache.MetaNamespaceKeyFunc(new)
if err == nil {
- c.queue.Add(key)
+ controller.queue.Add(key)
}
},
DeleteFunc: func(obj interface{}) {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err == nil {
- c.queue.Add(key)
+ controller.queue.Add(key)
}
},
},
)
- return informer
+ return informer, nil
}
diff --git a/controllers/sensor/informer_test.go b/controllers/sensor/informer_test.go
index 75a5a8042e..738d603fe8 100644
--- a/controllers/sensor/informer_test.go
+++ b/controllers/sensor/informer_test.go
@@ -17,29 +17,17 @@ limitations under the License.
package sensor
import (
- "github.com/argoproj/argo-events/common"
"testing"
- "github.com/smartystreets/goconvey/convey"
+ "github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/selection"
)
-func TestInformer(t *testing.T) {
- convey.Convey("Given a sensor controller", t, func() {
- controller := getSensorController()
- convey.Convey("Instance ID required key must match", func() {
- req := controller.instanceIDReq()
- convey.So(req.Key(), convey.ShouldEqual, common.LabelKeySensorControllerInstanceID)
- convey.So(req.Operator(), convey.ShouldEqual, selection.Equals)
- convey.So(req.Values().Has("argo-events"), convey.ShouldBeTrue)
- })
- })
-
- convey.Convey("Given a sensor controller", t, func() {
- controller := getSensorController()
- convey.Convey("Get a new informer and make sure its not nil", func() {
- i := controller.newSensorInformer()
- convey.So(i, convey.ShouldNotBeNil)
- })
- })
+func TestInformer_InstanceIDReq(t *testing.T) {
+ controller := getController()
+ req, err := controller.instanceIDReq()
+ assert.Nil(t, err)
+ assert.Equal(t, req.Key(), LabelControllerInstanceID)
+ assert.Equal(t, req.Operator(), selection.Equals)
+ assert.Equal(t, req.Values().Has("argo-events"), true)
}
diff --git a/controllers/sensor/state.go b/controllers/sensor/node.go
similarity index 65%
rename from controllers/sensor/state.go
rename to controllers/sensor/node.go
index d66584742a..a7b14c1d97 100644
--- a/controllers/sensor/state.go
+++ b/controllers/sensor/node.go
@@ -17,16 +17,13 @@ limitations under the License.
package sensor
import (
- "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
- "github.com/sirupsen/logrus"
"time"
"github.com/argoproj/argo-events/common"
apicommon "github.com/argoproj/argo-events/pkg/apis/common"
- sclient "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned"
- "k8s.io/apimachinery/pkg/api/errors"
+ "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
+ "github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/util/wait"
)
// GetNodeByName returns a copy of the node from this sensor for the nodename
@@ -48,7 +45,7 @@ func InitializeNode(sensor *v1alpha1.Sensor, nodeName string, nodeType v1alpha1.
nodeID := sensor.NodeID(nodeName)
oldNode, ok := sensor.Status.Nodes[nodeID]
if ok {
- log.WithField(common.LabelNodeName, nodeName).Info("node already initialized")
+ log.WithField(common.LabelNodeName, nodeName).Infoln("node already initialized")
return &oldNode
}
node := v1alpha1.NodeStatus{
@@ -69,50 +66,10 @@ func InitializeNode(sensor *v1alpha1.Sensor, nodeName string, nodeType v1alpha1.
common.LabelNodeName: node.DisplayName,
"node-message": node.Message,
},
- ).Info("node is initialized")
+ ).Infoln("node is initialized")
return &node
}
-// PersistUpdates persists the updates to the Sensor resource
-func PersistUpdates(client sclient.Interface, sensor *v1alpha1.Sensor, controllerInstanceId string, log *logrus.Logger) (*v1alpha1.Sensor, error) {
- sensorClient := client.ArgoprojV1alpha1().Sensors(sensor.ObjectMeta.Namespace)
- // in case persist update fails
- oldsensor := sensor.DeepCopy()
-
- sensor, err := sensorClient.Update(sensor)
- if err != nil {
- if errors.IsConflict(err) {
- log.WithError(err).Error("error updating sensor")
- return oldsensor, err
- }
-
- log.Info("re-applying updates on latest version and retrying update")
- err = ReapplyUpdate(client, sensor)
- if err != nil {
- log.WithError(err).Error("failed to re-apply update")
- return oldsensor, err
- }
- }
- log.WithField(common.LabelPhase, string(sensor.Status.Phase)).Info("sensor state updated successfully")
- return sensor, nil
-}
-
-// Reapply the update to sensor
-func ReapplyUpdate(sensorClient sclient.Interface, sensor *v1alpha1.Sensor) error {
- return wait.ExponentialBackoff(common.DefaultRetry, func() (bool, error) {
- client := sensorClient.ArgoprojV1alpha1().Sensors(sensor.Namespace)
- s, err := client.Update(sensor)
- if err != nil {
- if !common.IsRetryableKubeAPIError(err) {
- return false, err
- }
- return false, nil
- }
- sensor = s
- return true, nil
- })
-}
-
// MarkNodePhase marks the node with a phase, returns the node
func MarkNodePhase(sensor *v1alpha1.Sensor, nodeName string, nodeType v1alpha1.NodeType, phase v1alpha1.NodePhase, event *apicommon.Event, log *logrus.Logger, message ...string) *v1alpha1.NodeStatus {
node := GetNodeByName(sensor, nodeName)
@@ -123,7 +80,7 @@ func MarkNodePhase(sensor *v1alpha1.Sensor, nodeName string, nodeType v1alpha1.N
common.LabelNodeName: node.Name,
common.LabelPhase: string(node.Phase),
},
- ).Info("marking node phase")
+ ).Infoln("marking node phase")
node.Phase = phase
}
diff --git a/controllers/sensor/node_test.go b/controllers/sensor/node_test.go
new file mode 100644
index 0000000000..e66e16f9dd
--- /dev/null
+++ b/controllers/sensor/node_test.go
@@ -0,0 +1,93 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sensor
+
+import (
+ "testing"
+
+ "github.com/argoproj/argo-events/common"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
+ fakesensor "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned/fake"
+ "github.com/stretchr/testify/assert"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func TestSensorState(t *testing.T) {
+ fakeSensorClient := fakesensor.NewSimpleClientset()
+ logger := common.NewArgoEventsLogger()
+ fakeSensor := &v1alpha1.Sensor{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-sensor",
+ Namespace: "test",
+ },
+ }
+
+ fakeSensor, err := fakeSensorClient.ArgoprojV1alpha1().Sensors(fakeSensor.Namespace).Create(fakeSensor)
+ assert.Nil(t, err)
+
+ tests := []struct {
+ name string
+ testFunc func(t *testing.T)
+ }{
+ {
+ name: "initialize a new node",
+ testFunc: func(t *testing.T) {
+ status := InitializeNode(fakeSensor, "first_node", v1alpha1.NodeTypeEventDependency, logger)
+ assert.Equal(t, status.Phase, v1alpha1.NodePhaseNew)
+ },
+ },
+ {
+ name: "persist updates to the sensor",
+ testFunc: func(t *testing.T) {
+ sensor, err := PersistUpdates(fakeSensorClient, fakeSensor, logger)
+ assert.Nil(t, err)
+ assert.Equal(t, len(sensor.Status.Nodes), 1)
+ },
+ },
+ {
+ name: "mark node state to active",
+ testFunc: func(t *testing.T) {
+ status := MarkNodePhase(fakeSensor, "first_node", v1alpha1.NodeTypeEventDependency, v1alpha1.NodePhaseActive, &apicommon.Event{
+ Payload: []byte("test payload"),
+ }, logger)
+ assert.Equal(t, status.Phase, v1alpha1.NodePhaseActive)
+ },
+ },
+ {
+ name: "reapply the update",
+ testFunc: func(t *testing.T) {
+ err := ReapplyUpdate(fakeSensorClient, fakeSensor)
+ assert.Nil(t, err)
+ },
+ },
+ {
+ name: "fetch sensor and check updates are applied",
+ testFunc: func(t *testing.T) {
+ updatedSensor, err := fakeSensorClient.ArgoprojV1alpha1().Sensors(fakeSensor.Namespace).Get(fakeSensor.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.Equal(t, len(updatedSensor.Status.Nodes), 1)
+ },
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ test.testFunc(t)
+ })
+ }
+}
diff --git a/controllers/sensor/operator.go b/controllers/sensor/operator.go
index 99c058cd8c..95227ac995 100644
--- a/controllers/sensor/operator.go
+++ b/controllers/sensor/operator.go
@@ -17,388 +17,327 @@ limitations under the License.
package sensor
import (
- "github.com/sirupsen/logrus"
"time"
- "github.com/pkg/errors"
-
"github.com/argoproj/argo-events/common"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
"github.com/argoproj/argo-events/pkg/apis/sensor"
"github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
- corev1 "k8s.io/api/core/v1"
+ sensorclientset "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned"
+ "github.com/sirupsen/logrus"
+ "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/wait"
)
// the context of an operation on a sensor.
-// the sensor-controller creates this context each time it picks a Sensor off its queue.
-type sOperationCtx struct {
- // s is the sensor object
- s *v1alpha1.Sensor
+// the controller creates this context each time it picks a Sensor off its queue.
+type sensorContext struct {
+ // sensor is the sensor object
+ sensor *v1alpha1.Sensor
// updated indicates whether the sensor object was updated and needs to be persisted back to k8
updated bool
- // log is the logrus logging context to correlate logs with a sensor
- log *logrus.Logger
- // reference to the sensor-controller
- controller *SensorController
- // srctx is the context to handle child resource
- srctx sResourceCtx
+ // logger logs stuff
+ logger *logrus.Logger
+ // reference to the controller
+ controller *Controller
}
-// newSensorOperationCtx creates and initializes a new sOperationCtx object
-func newSensorOperationCtx(s *v1alpha1.Sensor, controller *SensorController) *sOperationCtx {
- return &sOperationCtx{
- s: s.DeepCopy(),
+// newSensorContext creates and initializes a new sensorContext object
+func newSensorContext(sensorObj *v1alpha1.Sensor, controller *Controller) *sensorContext {
+ return &sensorContext{
+ sensor: sensorObj.DeepCopy(),
updated: false,
- log: common.NewArgoEventsLogger().WithFields(
+ logger: common.NewArgoEventsLogger().WithFields(
map[string]interface{}{
- common.LabelSensorName: s.Name,
- common.LabelNamespace: s.Namespace,
+ common.LabelSensorName: sensorObj.Name,
+ common.LabelNamespace: sensorObj.Namespace,
}).Logger,
controller: controller,
- srctx: NewSensorResourceContext(s, controller),
}
}
-// operate on sensor resource
-func (soc *sOperationCtx) operate() error {
- defer func() {
- if soc.updated {
- // persist updates to sensor resource
- labels := map[string]string{
- common.LabelSensorName: soc.s.Name,
- common.LabelSensorKeyPhase: string(soc.s.Status.Phase),
- common.LabelKeySensorControllerInstanceID: soc.controller.Config.InstanceID,
- common.LabelOperation: "persist_state_update",
- }
- eventType := common.StateChangeEventType
+// operate manages the lifecycle of a sensor object
+func (ctx *sensorContext) operate() error {
+ defer ctx.updateSensorState()
- updatedSensor, err := PersistUpdates(soc.controller.sensorClientset, soc.s, soc.controller.Config.InstanceID, soc.log)
- if err != nil {
- soc.log.WithError(err).Error("failed to persist sensor update, escalating...")
+ ctx.logger.Infoln("processing the sensor")
- // escalate failure
- eventType = common.EscalationEventType
- }
-
- // update sensor ref. in case of failure to persist updates, this is a deep copy of old sensor resource
- soc.s = updatedSensor
-
- labels[common.LabelEventType] = string(eventType)
- if err := common.GenerateK8sEvent(soc.controller.kubeClientset,
- "persist update",
- eventType,
- "sensor state update",
- soc.s.Name,
- soc.s.Namespace,
- soc.controller.Config.InstanceID,
- sensor.Kind,
- labels); err != nil {
- soc.log.WithError(err).Error("failed to create K8s event to log sensor state persist operation")
- return
- }
- soc.log.Info("successfully persisted sensor resource update and created K8s event")
- }
- soc.updated = false
- }()
+ // Validation failure prevents any sort processing of the sensor object
+ if err := ValidateSensor(ctx.sensor); err != nil {
+ ctx.logger.WithError(err).Errorln("failed to validate sensor")
+ ctx.markSensorPhase(v1alpha1.NodePhaseError, false, err.Error())
+ return err
+ }
- switch soc.s.Status.Phase {
+ switch ctx.sensor.Status.Phase {
case v1alpha1.NodePhaseNew:
- err := soc.createSensorResources()
- if err != nil {
- return err
+ // If the sensor phase is new
+ // 1. Initialize all nodes - dependencies, dependency groups and triggers
+ // 2. Make dependencies and dependency groups as active
+ // 3. Create a deployment and service (if needed) for the sensor
+ ctx.initializeAllNodes()
+ ctx.markDependencyNodesActive()
+
+ if err := ctx.createSensorResources(); err != nil {
+ ctx.logger.WithError(err).Errorln("failed to create resources for the sensor")
+ ctx.markSensorPhase(v1alpha1.NodePhaseError, false, err.Error())
+ return nil
}
+ ctx.markSensorPhase(v1alpha1.NodePhaseActive, false, "sensor is active")
+ ctx.logger.Infoln("successfully created resources for the sensor. sensor is in active state")
case v1alpha1.NodePhaseActive:
- soc.log.Info("sensor is running")
-
- err := soc.updateSensorResources()
- if err != nil {
+ ctx.logger.Infoln("checking for updates to the sensor object")
+ if err := ctx.updateSensorResources(); err != nil {
+ ctx.logger.WithError(err).Errorln("failed to update the sensor resources")
return err
}
+ ctx.updated = true
+ ctx.logger.Infoln("successfully processed sensor state update")
case v1alpha1.NodePhaseError:
- soc.log.Info("sensor is in error state. check sensor resource status information and corresponding escalated K8 event for the error")
-
- err := soc.updateSensorResources()
- if err != nil {
+ // If the sensor is in error state and if the sensor podTemplate spec has changed, then update the corresponding deployment
+ ctx.logger.Info("sensor is in error state, checking for updates to the sensor object")
+ if err := ctx.updateSensorResources(); err != nil {
+ ctx.logger.WithError(err).Errorln("failed to update the sensor resources")
return err
}
+ ctx.markSensorPhase(v1alpha1.NodePhaseActive, false, "sensor is active")
+ ctx.logger.Infoln("successfully processed the update")
}
+
return nil
}
-func (soc *sOperationCtx) createSensorResources() error {
- err := ValidateSensor(soc.s)
+// createSensorResources creates the K8s resources for a sensor object
+func (ctx *sensorContext) createSensorResources() error {
+ if ctx.sensor.Status.Resources == nil {
+ ctx.sensor.Status.Resources = &v1alpha1.SensorResources{}
+ }
+
+ ctx.logger.Infoln("generating deployment specification for the sensor")
+ deployment, err := ctx.deploymentBuilder()
if err != nil {
- soc.log.WithError(err).Error("failed to validate sensor")
- err = errors.Wrap(err, "failed to validate sensor")
- soc.markSensorPhase(v1alpha1.NodePhaseError, false, err.Error())
return err
}
-
- soc.initializeAllNodes()
- pod, err := soc.createSensorPod()
+ ctx.logger.WithField("name", deployment.Name).Infoln("creating the deployment resource for the sensor")
+ deployment, err = ctx.createDeployment(deployment)
if err != nil {
- err = errors.Wrap(err, "failed to create sensor pod")
- soc.markSensorPhase(v1alpha1.NodePhaseError, false, err.Error())
return err
}
- soc.markAllNodePhases()
- soc.log.WithField(common.LabelPodName, pod.Name).Info("sensor pod is created")
+ ctx.sensor.Status.Resources.Deployment = &deployment.ObjectMeta
- // expose sensor if service is configured
- if soc.srctx.getServiceTemplateSpec() != nil {
- svc, err := soc.createSensorService()
+ if ctx.sensor.Spec.EventProtocol.Type == apicommon.HTTP {
+ ctx.logger.Infoln("generating service specification for the sensor")
+ service, err := ctx.serviceBuilder()
if err != nil {
- err = errors.Wrap(err, "failed to create sensor service")
- soc.markSensorPhase(v1alpha1.NodePhaseError, false, err.Error())
return err
}
- soc.log.WithField(common.LabelServiceName, svc.Name).Info("sensor service is created")
- }
-
- // if we get here - we know the signals are running
- soc.log.Info("marking sensor as active")
- soc.markSensorPhase(v1alpha1.NodePhaseActive, false, "listening for events")
- return nil
-}
-func (soc *sOperationCtx) createSensorPod() (*corev1.Pod, error) {
- pod, err := soc.srctx.newSensorPod()
- if err != nil {
- soc.log.WithError(err).Error("failed to initialize pod for sensor")
- return nil, err
- }
- pod, err = soc.srctx.createSensorPod(pod)
- if err != nil {
- soc.log.WithError(err).Error("failed to create pod for sensor")
- return nil, err
- }
- return pod, nil
-}
-
-func (soc *sOperationCtx) createSensorService() (*corev1.Service, error) {
- svc, err := soc.srctx.newSensorService()
- if err != nil {
- soc.log.WithError(err).Error("failed to initialize service for sensor")
- return nil, err
- }
- svc, err = soc.srctx.createSensorService(svc)
- if err != nil {
- soc.log.WithError(err).Error("failed to create service for sensor")
- return nil, err
- }
- return svc, nil
-}
-
-func (soc *sOperationCtx) updateSensorResources() error {
- err := ValidateSensor(soc.s)
- if err != nil {
- soc.log.WithError(err).Error("failed to validate sensor")
- err = errors.Wrap(err, "failed to validate sensor")
- if soc.s.Status.Phase != v1alpha1.NodePhaseError {
- soc.markSensorPhase(v1alpha1.NodePhaseError, false, err.Error())
+ ctx.logger.WithField("name", service.Name).Infoln("generating deployment specification for the sensor")
+ service, err = ctx.createService(service)
+ if err != nil {
+ return err
}
+ ctx.sensor.Status.Resources.Service = &service.ObjectMeta
return err
}
+ return nil
+}
- _, podChanged, err := soc.updateSensorPod()
+// updateSensorResources updates the sensor resources
+func (ctx *sensorContext) updateSensorResources() error {
+ deployment, err := ctx.updateDeployment()
if err != nil {
- err = errors.Wrap(err, "failed to update sensor pod")
- soc.markSensorPhase(v1alpha1.NodePhaseError, false, err.Error())
return err
}
-
- _, svcChanged, err := soc.updateSensorService()
+ ctx.sensor.Status.Resources.Deployment = &deployment.ObjectMeta
+ service, err := ctx.updateService()
if err != nil {
- err = errors.Wrap(err, "failed to update sensor service")
- soc.markSensorPhase(v1alpha1.NodePhaseError, false, err.Error())
return err
}
-
- if soc.s.Status.Phase != v1alpha1.NodePhaseActive && (podChanged || svcChanged) {
- soc.markSensorPhase(v1alpha1.NodePhaseActive, false, "sensor is active")
+ if service == nil {
+ ctx.sensor.Status.Resources.Service = nil
+ return nil
}
-
+ ctx.sensor.Status.Resources.Service = &service.ObjectMeta
return nil
}
-func (soc *sOperationCtx) updateSensorPod() (*corev1.Pod, bool, error) {
- // Check if sensor spec has changed for pod.
- existingPod, err := soc.srctx.getSensorPod()
- if err != nil {
- soc.log.WithError(err).Error("failed to get pod for sensor")
- return nil, false, err
- }
-
- // create a new pod spec
- newPod, err := soc.srctx.newSensorPod()
- if err != nil {
- soc.log.WithError(err).Error("failed to initialize pod for sensor")
- return nil, false, err
- }
-
- // check if pod spec remained unchanged
- if existingPod != nil {
- if existingPod.Annotations != nil && existingPod.Annotations[common.AnnotationSensorResourceSpecHashName] == newPod.Annotations[common.AnnotationSensorResourceSpecHashName] {
- soc.log.WithField(common.LabelPodName, existingPod.Name).Debug("sensor pod spec unchanged")
- return nil, false, nil
+// updateSensorState updates the sensor resource state
+func (ctx *sensorContext) updateSensorState() {
+ if ctx.updated {
+ // persist updates to sensor resource
+ labels := map[string]string{
+ common.LabelSensorName: ctx.sensor.Name,
+ LabelPhase: string(ctx.sensor.Status.Phase),
+ LabelControllerInstanceID: ctx.controller.Config.InstanceID,
+ common.LabelOperation: "persist_state_update",
}
+ eventType := common.StateChangeEventType
- // By now we are sure that the spec changed, so lets go ahead and delete the exisitng sensor pod.
- soc.log.WithField(common.LabelPodName, existingPod.Name).Info("sensor pod spec changed")
-
- err := soc.srctx.deleteSensorPod(existingPod)
+ updatedSensor, err := PersistUpdates(ctx.controller.sensorClient, ctx.sensor, ctx.logger)
if err != nil {
- soc.log.WithError(err).Error("failed to delete pod for sensor")
- return nil, false, err
- }
-
- soc.log.WithField(common.LabelPodName, existingPod.Name).Info("sensor pod is deleted")
- }
-
- // Create new pod for updated sensor spec.
- createdPod, err := soc.srctx.createSensorPod(newPod)
- if err != nil {
- soc.log.WithError(err).Error("failed to create pod for sensor")
- return nil, false, err
- }
- soc.log.WithField(common.LabelPodName, newPod.Name).Info("sensor pod is created")
-
- return createdPod, true, nil
-}
-
-func (soc *sOperationCtx) updateSensorService() (*corev1.Service, bool, error) {
- // Check if sensor spec has changed for service.
- existingSvc, err := soc.srctx.getSensorService()
- if err != nil {
- soc.log.WithError(err).Error("failed to get service for sensor")
- return nil, false, err
- }
-
- // create a new service spec
- newSvc, err := soc.srctx.newSensorService()
- if err != nil {
- soc.log.WithError(err).Error("failed to initialize service for sensor")
- return nil, false, err
- }
-
- if existingSvc != nil {
- // updated spec doesn't have service defined, delete existing service.
- if newSvc == nil {
- if err := soc.srctx.deleteSensorService(existingSvc); err != nil {
- return nil, false, err
- }
- return nil, true, nil
- }
+ ctx.logger.WithError(err).Errorln("failed to persist sensor update")
- // check if service spec remained unchanged
- if existingSvc.Annotations[common.AnnotationSensorResourceSpecHashName] == newSvc.Annotations[common.AnnotationSensorResourceSpecHashName] {
- soc.log.WithField(common.LabelServiceName, existingSvc.Name).Debug("sensor service spec unchanged")
- return nil, false, nil
+ // escalate failure
+ eventType = common.EscalationEventType
}
- // service spec changed, delete existing service and create new one
- soc.log.WithField(common.LabelServiceName, existingSvc.Name).Info("sensor service spec changed")
-
- if err := soc.srctx.deleteSensorService(existingSvc); err != nil {
- return nil, false, err
+ // update sensor ref. in case of failure to persist updates, this is a deep copy of old sensor resource
+ ctx.sensor = updatedSensor
+
+ labels[common.LabelEventType] = string(eventType)
+ if err := common.GenerateK8sEvent(ctx.controller.k8sClient,
+ "persist update",
+ eventType,
+ "sensor state update",
+ ctx.sensor.Name,
+ ctx.sensor.Namespace,
+ ctx.controller.Config.InstanceID,
+ sensor.Kind,
+ labels); err != nil {
+ ctx.logger.WithError(err).Error("failed to create K8s event to logger sensor state persist operation")
+ return
}
- } else if newSvc == nil {
- // sensor service doesn't exist originally
- return nil, false, nil
- }
-
- // change createSensorService to take a service spec
- createdSvc, err := soc.srctx.createSensorService(newSvc)
- if err != nil {
- soc.log.WithField(common.LabelServiceName, newSvc.Name).WithError(err).Error("failed to create service for sensor")
- return nil, false, err
+ ctx.logger.Info("successfully persisted sensor resource update and created K8s event")
}
- soc.log.WithField(common.LabelServiceName, newSvc.Name).Info("sensor service is created")
-
- return createdSvc, true, nil
+ ctx.updated = false
}
// mark the overall sensor phase
-func (soc *sOperationCtx) markSensorPhase(phase v1alpha1.NodePhase, markComplete bool, message ...string) {
- justCompleted := soc.s.Status.Phase != phase
+func (ctx *sensorContext) markSensorPhase(phase v1alpha1.NodePhase, markComplete bool, message ...string) {
+ justCompleted := ctx.sensor.Status.Phase != phase
if justCompleted {
- soc.log.WithFields(
+ ctx.logger.WithFields(
map[string]interface{}{
- "old": string(soc.s.Status.Phase),
+ "old": string(ctx.sensor.Status.Phase),
"new": string(phase),
},
- ).Info("phase updated")
+ ).Infoln("phase updated")
- soc.s.Status.Phase = phase
- if soc.s.ObjectMeta.Labels == nil {
- soc.s.ObjectMeta.Labels = make(map[string]string)
+ ctx.sensor.Status.Phase = phase
+
+ if ctx.sensor.ObjectMeta.Labels == nil {
+ ctx.sensor.ObjectMeta.Labels = make(map[string]string)
}
- if soc.s.ObjectMeta.Annotations == nil {
- soc.s.ObjectMeta.Annotations = make(map[string]string)
+
+ if ctx.sensor.ObjectMeta.Annotations == nil {
+ ctx.sensor.ObjectMeta.Annotations = make(map[string]string)
}
- soc.s.ObjectMeta.Labels[common.LabelSensorKeyPhase] = string(phase)
- // add annotations so a resource sensor can watch this sensor.
- soc.s.ObjectMeta.Annotations[common.LabelSensorKeyPhase] = string(phase)
+
+ ctx.sensor.ObjectMeta.Labels[LabelPhase] = string(phase)
+ ctx.sensor.ObjectMeta.Annotations[LabelPhase] = string(phase)
}
- if soc.s.Status.StartedAt.IsZero() {
- soc.s.Status.StartedAt = metav1.Time{Time: time.Now().UTC()}
+
+ if ctx.sensor.Status.StartedAt.IsZero() {
+ ctx.sensor.Status.StartedAt = metav1.Time{Time: time.Now().UTC()}
}
- if len(message) > 0 && soc.s.Status.Message != message[0] {
- soc.log.WithFields(
+
+ if len(message) > 0 && ctx.sensor.Status.Message != message[0] {
+ ctx.logger.WithFields(
map[string]interface{}{
- "old": soc.s.Status.Message,
+ "old": ctx.sensor.Status.Message,
"new": message[0],
},
- ).Info("sensor message updated")
- soc.s.Status.Message = message[0]
+ ).Infoln("sensor message updated")
+
+ ctx.sensor.Status.Message = message[0]
}
switch phase {
- case v1alpha1.NodePhaseComplete, v1alpha1.NodePhaseError:
+ case v1alpha1.NodePhaseError:
if markComplete && justCompleted {
- soc.log.Info("marking sensor complete")
- soc.s.Status.CompletedAt = metav1.Time{Time: time.Now().UTC()}
- if soc.s.ObjectMeta.Labels == nil {
- soc.s.ObjectMeta.Labels = make(map[string]string)
+ ctx.logger.Infoln("marking sensor state as complete")
+ ctx.sensor.Status.CompletedAt = metav1.Time{Time: time.Now().UTC()}
+
+ if ctx.sensor.ObjectMeta.Labels == nil {
+ ctx.sensor.ObjectMeta.Labels = make(map[string]string)
+ }
+ if ctx.sensor.ObjectMeta.Annotations == nil {
+ ctx.sensor.ObjectMeta.Annotations = make(map[string]string)
}
- soc.s.ObjectMeta.Labels[common.LabelSensorKeyComplete] = "true"
- soc.s.ObjectMeta.Annotations[common.LabelSensorKeyComplete] = string(phase)
+
+ ctx.sensor.ObjectMeta.Labels[LabelComplete] = "true"
+ ctx.sensor.ObjectMeta.Annotations[LabelComplete] = string(phase)
}
}
- soc.updated = true
+ ctx.updated = true
}
-func (soc *sOperationCtx) initializeAllNodes() {
+// initializeAllNodes initializes nodes of all types within a sensor
+func (ctx *sensorContext) initializeAllNodes() {
// Initialize all event dependency nodes
- for _, dependency := range soc.s.Spec.Dependencies {
- InitializeNode(soc.s, dependency.Name, v1alpha1.NodeTypeEventDependency, soc.log)
+ for _, dependency := range ctx.sensor.Spec.Dependencies {
+ InitializeNode(ctx.sensor, dependency.Name, v1alpha1.NodeTypeEventDependency, ctx.logger)
}
// Initialize all dependency groups
- if soc.s.Spec.DependencyGroups != nil {
- for _, group := range soc.s.Spec.DependencyGroups {
- InitializeNode(soc.s, group.Name, v1alpha1.NodeTypeDependencyGroup, soc.log)
+ if ctx.sensor.Spec.DependencyGroups != nil {
+ for _, group := range ctx.sensor.Spec.DependencyGroups {
+ InitializeNode(ctx.sensor, group.Name, v1alpha1.NodeTypeDependencyGroup, ctx.logger)
}
}
// Initialize all trigger nodes
- for _, trigger := range soc.s.Spec.Triggers {
- InitializeNode(soc.s, trigger.Template.Name, v1alpha1.NodeTypeTrigger, soc.log)
+ for _, trigger := range ctx.sensor.Spec.Triggers {
+ InitializeNode(ctx.sensor, trigger.Template.Name, v1alpha1.NodeTypeTrigger, ctx.logger)
}
}
-func (soc *sOperationCtx) markAllNodePhases() {
+// markDependencyNodesActive marks phase of all dependencies and dependency groups as active
+func (ctx *sensorContext) markDependencyNodesActive() {
// Mark all event dependency nodes as active
- for _, dependency := range soc.s.Spec.Dependencies {
- MarkNodePhase(soc.s, dependency.Name, v1alpha1.NodeTypeEventDependency, v1alpha1.NodePhaseActive, nil, soc.log, "node is active")
+ for _, dependency := range ctx.sensor.Spec.Dependencies {
+ MarkNodePhase(ctx.sensor, dependency.Name, v1alpha1.NodeTypeEventDependency, v1alpha1.NodePhaseActive, nil, ctx.logger, "node is active")
}
// Mark all dependency groups as active
- if soc.s.Spec.DependencyGroups != nil {
- for _, group := range soc.s.Spec.DependencyGroups {
- MarkNodePhase(soc.s, group.Name, v1alpha1.NodeTypeDependencyGroup, v1alpha1.NodePhaseActive, nil, soc.log, "node is active")
+ if ctx.sensor.Spec.DependencyGroups != nil {
+ for _, group := range ctx.sensor.Spec.DependencyGroups {
+ MarkNodePhase(ctx.sensor, group.Name, v1alpha1.NodeTypeDependencyGroup, v1alpha1.NodePhaseActive, nil, ctx.logger, "node is active")
+ }
+ }
+}
+
+// PersistUpdates persists the updates to the Sensor resource
+func PersistUpdates(client sensorclientset.Interface, sensorObj *v1alpha1.Sensor, log *logrus.Logger) (*v1alpha1.Sensor, error) {
+ sensorClient := client.ArgoprojV1alpha1().Sensors(sensorObj.ObjectMeta.Namespace)
+ // in case persist update fails
+ oldsensor := sensorObj.DeepCopy()
+
+ sensorObj, err := sensorClient.Update(sensorObj)
+ if err != nil {
+ if errors.IsConflict(err) {
+ log.WithError(err).Error("error updating sensor")
+ return oldsensor, err
+ }
+
+ log.Infoln(err)
+ log.Infoln("re-applying updates on latest version and retrying update")
+ err = ReapplyUpdate(client, sensorObj)
+ if err != nil {
+ log.WithError(err).Error("failed to re-apply update")
+ return oldsensor, err
}
}
+ log.WithField(common.LabelPhase, string(sensorObj.Status.Phase)).Info("sensor state updated successfully")
+ return sensorObj, nil
+}
+
+// Reapply the update to sensor
+func ReapplyUpdate(sensorClient sensorclientset.Interface, sensor *v1alpha1.Sensor) error {
+ return wait.ExponentialBackoff(common.DefaultRetry, func() (bool, error) {
+ client := sensorClient.ArgoprojV1alpha1().Sensors(sensor.Namespace)
+ s, err := client.Update(sensor)
+ if err != nil {
+ if !common.IsRetryableKubeAPIError(err) {
+ return false, err
+ }
+ return false, nil
+ }
+ sensor = s
+ return true, nil
+ })
}
diff --git a/controllers/sensor/operator_test.go b/controllers/sensor/operator_test.go
index 7809e05c75..e81d804887 100644
--- a/controllers/sensor/operator_test.go
+++ b/controllers/sensor/operator_test.go
@@ -17,356 +17,146 @@ limitations under the License.
package sensor
import (
- "testing"
-
+ "github.com/argoproj/argo-events/common"
"github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
+ "github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/client-go/tools/cache"
+ "testing"
)
-var sensorStr = `
-
-apiVersion: argoproj.io/v1alpha1
-kind: Sensor
-metadata:
- name: artifact-sensor
- namespace: argo-events
- labels:
- sensors.argoproj.io/sensor-controller-instanceid: argo-events
-spec:
- template:
- containers:
- - name: "sensor"
- image: "argoproj/sensor"
- imagePullPolicy: Always
- serviceAccountName: argo-events-sa
- dependencies:
- - name: artifact-gateway:input
- eventProtocol:
- type: "HTTP"
- http:
- port: "9300"
- triggers:
- - template:
- name: artifact-workflow-trigger
- group: argoproj.io
- version: v1alpha1
- kind: Workflow
- source:
- inline: |
- apiVersion: argoproj.io/v1alpha1
- kind: Workflow
- metadata:
- generateName: hello-world-
- spec:
- entrypoint: whalesay
- templates:
- -
- container:
- args:
- - "hello world"
- command:
- - cowsay
- image: "docker/whalesay:latest"
- name: whalesay
-`
+func TestOperate(t *testing.T) {
+ controller := getController()
+ ctx := newSensorContext(sensorObj.DeepCopy(), controller)
+ sensor, err := controller.sensorClient.ArgoprojV1alpha1().Sensors(sensorObj.Namespace).Create(sensorObj)
+ assert.Nil(t, err)
+ ctx.sensor = sensor.DeepCopy()
+
+ tests := []struct {
+ name string
+ updateFunc func()
+ testFunc func(oldMetadata *v1alpha1.SensorResources)
+ }{
+ {
+ name: "process a new sensor object",
+ updateFunc: func() {},
+ testFunc: func(oldMetadata *v1alpha1.SensorResources) {
+ assert.NotNil(t, ctx.sensor.Status.Resources)
+ metadata := ctx.sensor.Status.Resources
+ deployment, err := controller.k8sClient.AppsV1().Deployments(metadata.Deployment.Namespace).Get(metadata.Deployment.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, deployment)
+ service, err := controller.k8sClient.CoreV1().Services(metadata.Service.Namespace).Get(metadata.Service.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, service)
+ assert.Equal(t, v1alpha1.NodePhaseActive, ctx.sensor.Status.Phase)
+ assert.Equal(t, 2, len(ctx.sensor.Status.Nodes))
+ assert.Equal(t, "sensor is active", ctx.sensor.Status.Message)
+ },
+ },
+ {
+ name: "process a sensor object update",
+ updateFunc: func() {
+ ctx.sensor.Spec.Template.Spec.Containers[0].Name = "updated-name"
+ },
+ testFunc: func(oldMetadata *v1alpha1.SensorResources) {
+ assert.NotNil(t, ctx.sensor.Status.Resources)
+ metadata := ctx.sensor.Status.Resources
+ deployment, err := controller.k8sClient.AppsV1().Deployments(metadata.Deployment.Namespace).Get(metadata.Deployment.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, deployment)
+ assert.NotEqual(t, oldMetadata.Deployment.Annotations[common.AnnotationResourceSpecHash], deployment.Annotations[common.AnnotationResourceSpecHash])
+ assert.Equal(t, deployment.Spec.Template.Spec.Containers[0].Name, "updated-name")
+ service, err := controller.k8sClient.CoreV1().Services(metadata.Service.Namespace).Get(metadata.Service.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, service)
+ assert.Equal(t, oldMetadata.Service.Annotations[common.AnnotationResourceSpecHash], service.Annotations[common.AnnotationResourceSpecHash])
+ assert.Equal(t, v1alpha1.NodePhaseActive, ctx.sensor.Status.Phase)
+ assert.Equal(t, "sensor is active", ctx.sensor.Status.Message)
+ },
+ },
+ {
+ name: "process a sensor in error state",
+ updateFunc: func() {
+ ctx.sensor.Status.Phase = v1alpha1.NodePhaseError
+ ctx.sensor.Status.Message = "sensor is in error state"
+ ctx.sensor.Spec.Template.Spec.Containers[0].Name = "revert-name"
+ },
+ testFunc: func(oldMetadata *v1alpha1.SensorResources) {
+ assert.Equal(t, v1alpha1.NodePhaseActive, ctx.sensor.Status.Phase)
+ assert.Equal(t, "sensor is active", ctx.sensor.Status.Message)
+ },
+ },
+ }
-var (
- sensorPodName = "artifact-sensor"
- sensorSvcName = "artifact-sensor-svc"
-)
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ metadata := ctx.sensor.Status.Resources.DeepCopy()
+ test.updateFunc()
+ err := ctx.operate()
+ assert.Nil(t, err)
+ test.testFunc(metadata)
+ })
+ }
+}
-func getSensor() (*v1alpha1.Sensor, error) {
- var sensor *v1alpha1.Sensor
- err := yaml.Unmarshal([]byte(sensorStr), &sensor)
- return sensor, err
+func TestUpdateSensorState(t *testing.T) {
+ controller := getController()
+ ctx := newSensorContext(sensorObj.DeepCopy(), controller)
+ sensor, err := controller.sensorClient.ArgoprojV1alpha1().Sensors(sensorObj.Namespace).Create(sensorObj)
+ assert.Nil(t, err)
+ ctx.sensor = sensor.DeepCopy()
+ assert.Equal(t, v1alpha1.NodePhaseNew, ctx.sensor.Status.Phase)
+ ctx.sensor.Status.Phase = v1alpha1.NodePhaseActive
+ ctx.updated = true
+ ctx.updateSensorState()
+ assert.Equal(t, v1alpha1.NodePhaseActive, ctx.sensor.Status.Phase)
}
-func waitForAllInformers(done chan struct{}, controller *SensorController) {
- cache.WaitForCacheSync(done, controller.informer.HasSynced)
- cache.WaitForCacheSync(done, controller.podInformer.Informer().HasSynced)
- cache.WaitForCacheSync(done, controller.svcInformer.Informer().HasSynced)
+func TestMarkSensorPhase(t *testing.T) {
+ controller := getController()
+ ctx := newSensorContext(sensorObj.DeepCopy(), controller)
+ sensor, err := controller.sensorClient.ArgoprojV1alpha1().Sensors(sensorObj.Namespace).Create(sensorObj)
+ assert.Nil(t, err)
+ ctx.sensor = sensor.DeepCopy()
+ ctx.markSensorPhase(v1alpha1.NodePhaseActive, false, "sensor is active")
+ assert.Equal(t, v1alpha1.NodePhaseActive, ctx.sensor.Status.Phase)
+ assert.Equal(t, "sensor is active", ctx.sensor.Status.Message)
}
-func getPodAndService(controller *SensorController, namespace string) (*corev1.Pod, *corev1.Service, error) {
- pod, err := controller.kubeClientset.CoreV1().Pods(namespace).Get(sensorPodName, metav1.GetOptions{})
- if err != nil {
- return nil, nil, err
+func TestInitializeAllNodes(t *testing.T) {
+ controller := getController()
+ ctx := newSensorContext(sensorObj.DeepCopy(), controller)
+ ctx.initializeAllNodes()
+ for _, node := range ctx.sensor.Status.Nodes {
+ assert.Equal(t, v1alpha1.NodePhaseNew, node.Phase)
+ assert.NotEmpty(t, node.Name)
+ assert.NotEmpty(t, node.ID)
}
- svc, err := controller.kubeClientset.CoreV1().Services(namespace).Get(sensorSvcName, metav1.GetOptions{})
- if err != nil {
- return nil, nil, err
- }
- return pod, svc, err
}
-func deletePodAndService(controller *SensorController, namespace string) error {
- err := controller.kubeClientset.CoreV1().Pods(namespace).Delete(sensorPodName, &metav1.DeleteOptions{})
- if err != nil {
- return err
+func TestMarkDependencyNodesActive(t *testing.T) {
+ controller := getController()
+ ctx := newSensorContext(sensorObj.DeepCopy(), controller)
+ ctx.initializeAllNodes()
+ ctx.markDependencyNodesActive()
+ for _, node := range ctx.sensor.Status.Nodes {
+ if node.Type == v1alpha1.NodeTypeEventDependency {
+ assert.Equal(t, v1alpha1.NodePhaseActive, node.Phase)
+ } else {
+ assert.Equal(t, v1alpha1.NodePhaseNew, node.Phase)
+ }
}
- err = controller.kubeClientset.CoreV1().Services(namespace).Delete(sensorSvcName, &metav1.DeleteOptions{})
- return err
}
-func TestSensorOperations(t *testing.T) {
- done := make(chan struct{})
- convey.Convey("Given a sensor, parse it", t, func() {
- sensor, err := getSensor()
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensor, convey.ShouldNotBeNil)
-
- controller := getSensorController()
- soc := newSensorOperationCtx(sensor, controller)
- convey.ShouldPanic(soc.log, nil)
- convey.So(soc, convey.ShouldNotBeNil)
-
- convey.Convey("Create the sensor", func() {
- sensor, err = controller.sensorClientset.ArgoprojV1alpha1().Sensors(sensor.Namespace).Create(sensor)
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensor, convey.ShouldNotBeNil)
-
- convey.Convey("Operate on a new sensor", func() {
- soc.markSensorPhase(v1alpha1.NodePhaseNew, false, "test")
-
- waitForAllInformers(done, controller)
- err := soc.operate()
- convey.So(err, convey.ShouldBeNil)
- waitForAllInformers(done, controller)
-
- convey.Convey("Sensor should be marked as active with it's nodes initialized", func() {
- sensor, err = controller.sensorClientset.ArgoprojV1alpha1().Sensors(soc.s.Namespace).Get(soc.s.Name, metav1.GetOptions{})
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensor, convey.ShouldNotBeNil)
- convey.So(sensor.Status.Phase, convey.ShouldEqual, v1alpha1.NodePhaseActive)
-
- for _, node := range soc.s.Status.Nodes {
- switch node.Type {
- case v1alpha1.NodeTypeEventDependency:
- convey.So(node.Phase, convey.ShouldEqual, v1alpha1.NodePhaseActive)
- case v1alpha1.NodeTypeDependencyGroup:
- convey.So(node.Phase, convey.ShouldEqual, v1alpha1.NodePhaseActive)
- case v1alpha1.NodeTypeTrigger:
- convey.So(node.Phase, convey.ShouldEqual, v1alpha1.NodePhaseNew)
- }
- }
- })
-
- convey.Convey("Sensor pod and service should be created", func() {
- sensorPod, sensorSvc, err := getPodAndService(controller, sensor.Namespace)
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensorPod, convey.ShouldNotBeNil)
- convey.So(sensorSvc, convey.ShouldNotBeNil)
-
- convey.Convey("Go to active state", func() {
- sensor, err := controller.sensorClientset.ArgoprojV1alpha1().Sensors(sensor.Namespace).Get(sensor.Name, metav1.GetOptions{})
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensor.Status.Phase, convey.ShouldEqual, v1alpha1.NodePhaseActive)
- })
- })
- })
-
- convey.Convey("Operate on sensor in active state", func() {
- err := controller.sensorClientset.ArgoprojV1alpha1().Sensors(sensor.Namespace).Delete(sensor.Name, &metav1.DeleteOptions{})
- convey.So(err, convey.ShouldBeNil)
- sensor, err = controller.sensorClientset.ArgoprojV1alpha1().Sensors(sensor.Namespace).Create(sensor)
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensor, convey.ShouldNotBeNil)
-
- soc.markSensorPhase(v1alpha1.NodePhaseNew, false, "test")
-
- // Operate it once to create pod and service
- waitForAllInformers(done, controller)
- err = soc.operate()
- convey.So(err, convey.ShouldBeNil)
- waitForAllInformers(done, controller)
-
- sensorPod, sensorSvc, err := getPodAndService(controller, sensor.Namespace)
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensorPod, convey.ShouldNotBeNil)
- convey.So(sensorSvc, convey.ShouldNotBeNil)
-
- convey.Convey("Operation must succeed", func() {
- soc.markSensorPhase(v1alpha1.NodePhaseActive, false, "test")
-
- waitForAllInformers(done, controller)
- err := soc.operate()
- convey.So(err, convey.ShouldBeNil)
- waitForAllInformers(done, controller)
-
- convey.Convey("Untouch pod and service", func() {
- sensorPod, sensorSvc, err := getPodAndService(controller, sensor.Namespace)
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensorPod, convey.ShouldNotBeNil)
- convey.So(sensorSvc, convey.ShouldNotBeNil)
-
- convey.Convey("Stay in active state", func() {
- sensor, err := controller.sensorClientset.ArgoprojV1alpha1().Sensors(sensor.Namespace).Get(sensor.Name, metav1.GetOptions{})
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensor.Status.Phase, convey.ShouldEqual, v1alpha1.NodePhaseActive)
- })
- })
- })
-
- convey.Convey("With deleted pod and service", func() {
- err := deletePodAndService(controller, sensor.Namespace)
- convey.So(err, convey.ShouldBeNil)
-
- convey.Convey("Operation must succeed", func() {
- soc.markSensorPhase(v1alpha1.NodePhaseActive, false, "test")
-
- waitForAllInformers(done, controller)
- err := soc.operate()
- convey.So(err, convey.ShouldBeNil)
- waitForAllInformers(done, controller)
-
- convey.Convey("Create pod and service", func() {
- sensorPod, sensorSvc, err := getPodAndService(controller, sensor.Namespace)
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensorPod, convey.ShouldNotBeNil)
- convey.So(sensorSvc, convey.ShouldNotBeNil)
-
- convey.Convey("Stay in active state", func() {
- sensor, err := controller.sensorClientset.ArgoprojV1alpha1().Sensors(sensor.Namespace).Get(sensor.Name, metav1.GetOptions{})
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensor.Status.Phase, convey.ShouldEqual, v1alpha1.NodePhaseActive)
- })
- })
- })
- })
-
- convey.Convey("Change pod and service spec", func() {
- soc.srctx.s.Spec.Template.Spec.RestartPolicy = "Never"
- soc.srctx.s.Spec.EventProtocol.Http.Port = "1234"
-
- sensorPod, sensorSvc, err := getPodAndService(controller, sensor.Namespace)
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensorPod, convey.ShouldNotBeNil)
- convey.So(sensorSvc, convey.ShouldNotBeNil)
-
- convey.Convey("Operation must succeed", func() {
- soc.markSensorPhase(v1alpha1.NodePhaseActive, false, "test")
-
- waitForAllInformers(done, controller)
- err := soc.operate()
- convey.So(err, convey.ShouldBeNil)
- waitForAllInformers(done, controller)
-
- convey.Convey("Recreate pod and service", func() {
- sensorPod, sensorSvc, err := getPodAndService(controller, sensor.Namespace)
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensorPod.Spec.RestartPolicy, convey.ShouldEqual, "Never")
- convey.So(sensorSvc.Spec.Ports[0].TargetPort.IntVal, convey.ShouldEqual, 1234)
-
- convey.Convey("Stay in active state", func() {
- sensor, err := controller.sensorClientset.ArgoprojV1alpha1().Sensors(sensor.Namespace).Get(sensor.Name, metav1.GetOptions{})
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensor.Status.Phase, convey.ShouldEqual, v1alpha1.NodePhaseActive)
- })
- })
- })
- })
- })
-
- convey.Convey("Operate on sensor in error state", func() {
- err := controller.sensorClientset.ArgoprojV1alpha1().Sensors(sensor.Namespace).Delete(sensor.Name, &metav1.DeleteOptions{})
- convey.So(err, convey.ShouldBeNil)
- sensor, err = controller.sensorClientset.ArgoprojV1alpha1().Sensors(sensor.Namespace).Create(sensor)
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensor, convey.ShouldNotBeNil)
-
- soc.markSensorPhase(v1alpha1.NodePhaseNew, false, "test")
-
- // Operate it once to create pod and service
- waitForAllInformers(done, controller)
- err = soc.operate()
- convey.So(err, convey.ShouldBeNil)
- waitForAllInformers(done, controller)
-
- convey.Convey("Operation must succeed", func() {
- soc.markSensorPhase(v1alpha1.NodePhaseError, false, "test")
-
- waitForAllInformers(done, controller)
- err := soc.operate()
- convey.So(err, convey.ShouldBeNil)
- waitForAllInformers(done, controller)
-
- convey.Convey("Untouch pod and service", func() {
- sensorPod, sensorSvc, err := getPodAndService(controller, sensor.Namespace)
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensorPod, convey.ShouldNotBeNil)
- convey.So(sensorSvc, convey.ShouldNotBeNil)
-
- convey.Convey("Stay in error state", func() {
- sensor, err := controller.sensorClientset.ArgoprojV1alpha1().Sensors(sensor.Namespace).Get(sensor.Name, metav1.GetOptions{})
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensor.Status.Phase, convey.ShouldEqual, v1alpha1.NodePhaseError)
- })
- })
- })
-
- convey.Convey("With deleted pod and service", func() {
- err := deletePodAndService(controller, sensor.Namespace)
- convey.So(err, convey.ShouldBeNil)
-
- convey.Convey("Operation must succeed", func() {
- soc.markSensorPhase(v1alpha1.NodePhaseError, false, "test")
-
- waitForAllInformers(done, controller)
- err := soc.operate()
- convey.So(err, convey.ShouldBeNil)
- waitForAllInformers(done, controller)
-
- convey.Convey("Create pod and service", func() {
- sensorPod, sensorSvc, err := getPodAndService(controller, sensor.Namespace)
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensorPod, convey.ShouldNotBeNil)
- convey.So(sensorSvc, convey.ShouldNotBeNil)
-
- convey.Convey("Go to active state", func() {
- sensor, err := controller.sensorClientset.ArgoprojV1alpha1().Sensors(sensor.Namespace).Get(sensor.Name, metav1.GetOptions{})
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensor.Status.Phase, convey.ShouldEqual, v1alpha1.NodePhaseActive)
- })
- })
- })
- })
-
- convey.Convey("Change pod and service spec", func() {
- soc.srctx.s.Spec.Template.Spec.RestartPolicy = "Never"
- soc.srctx.s.Spec.EventProtocol.Http.Port = "1234"
-
- sensorPod, sensorSvc, err := getPodAndService(controller, sensor.Namespace)
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensorPod, convey.ShouldNotBeNil)
- convey.So(sensorSvc, convey.ShouldNotBeNil)
-
- convey.Convey("Operation must succeed", func() {
- soc.markSensorPhase(v1alpha1.NodePhaseError, false, "test")
-
- waitForAllInformers(done, controller)
- err := soc.operate()
- convey.So(err, convey.ShouldBeNil)
- waitForAllInformers(done, controller)
-
- convey.Convey("Recreate pod and service", func() {
- sensorPod, sensorSvc, err := getPodAndService(controller, sensor.Namespace)
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensorPod.Spec.RestartPolicy, convey.ShouldEqual, "Never")
- convey.So(sensorSvc.Spec.Ports[0].TargetPort.IntVal, convey.ShouldEqual, 1234)
-
- convey.Convey("Go to active state", func() {
- sensor, err := controller.sensorClientset.ArgoprojV1alpha1().Sensors(sensor.Namespace).Get(sensor.Name, metav1.GetOptions{})
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensor.Status.Phase, convey.ShouldEqual, v1alpha1.NodePhaseActive)
- })
- })
- })
- })
- })
- })
- })
+func TestPersistUpdates(t *testing.T) {
+ controller := getController()
+ ctx := newSensorContext(sensorObj.DeepCopy(), controller)
+ sensor, err := controller.sensorClient.ArgoprojV1alpha1().Sensors(sensorObj.Namespace).Create(sensorObj)
+ assert.Nil(t, err)
+ ctx.sensor = sensor.DeepCopy()
+ ctx.sensor.Spec.Circuit = "fake-group"
+ sensor, err = PersistUpdates(controller.sensorClient, ctx.sensor.DeepCopy(), ctx.logger)
+ assert.Nil(t, err)
+ assert.Equal(t, "fake-group", sensor.Spec.Circuit)
+ assert.Equal(t, "fake-group", ctx.sensor.Spec.Circuit)
}
diff --git a/controllers/sensor/resource.go b/controllers/sensor/resource.go
index 83545344d0..d9d12d1e0c 100644
--- a/controllers/sensor/resource.go
+++ b/controllers/sensor/resource.go
@@ -1,182 +1,178 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
package sensor
import (
"github.com/argoproj/argo-events/common"
controllerscommon "github.com/argoproj/argo-events/controllers/common"
- pc "github.com/argoproj/argo-events/pkg/apis/common"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
"github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
+ "github.com/pkg/errors"
+ appv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
+ apierror "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/intstr"
)
-type sResourceCtx struct {
- // s is the gateway-controller object
- s *v1alpha1.Sensor
- // reference to the gateway-controller-controller
- controller *SensorController
-
- controllerscommon.ChildResourceContext
+// generateServiceSpec returns a K8s service spec for the sensor
+func (ctx *sensorContext) generateServiceSpec() *corev1.Service {
+ return &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{
+ common.LabelSensorName: ctx.sensor.Name,
+ LabelControllerInstanceID: ctx.controller.Config.InstanceID,
+ },
+ },
+ Spec: corev1.ServiceSpec{
+ Ports: []corev1.ServicePort{
+ {
+ Port: intstr.Parse(ctx.sensor.Spec.EventProtocol.Http.Port).IntVal,
+ TargetPort: intstr.FromInt(int(intstr.Parse(ctx.sensor.Spec.EventProtocol.Http.Port).IntVal)),
+ },
+ },
+ Type: corev1.ServiceTypeClusterIP,
+ Selector: map[string]string{
+ common.LabelOwnerName: ctx.sensor.Name,
+ },
+ },
+ }
}
-// NewSensorResourceContext returns new sResourceCtx
-func NewSensorResourceContext(s *v1alpha1.Sensor, controller *SensorController) sResourceCtx {
- return sResourceCtx{
- s: s,
- controller: controller,
- ChildResourceContext: controllerscommon.ChildResourceContext{
- SchemaGroupVersionKind: v1alpha1.SchemaGroupVersionKind,
- LabelOwnerName: common.LabelSensorName,
- LabelKeyOwnerControllerInstanceID: common.LabelKeySensorControllerInstanceID,
- AnnotationOwnerResourceHashName: common.AnnotationSensorResourceSpecHashName,
- InstanceID: controller.Config.InstanceID,
- },
+// serviceBuilder builds a new service that exposes sensor.
+func (ctx *sensorContext) serviceBuilder() (*corev1.Service, error) {
+ service := ctx.generateServiceSpec()
+ if err := controllerscommon.SetObjectMeta(ctx.sensor, service, v1alpha1.SchemaGroupVersionKind); err != nil {
+ return nil, err
}
+ return service, nil
}
-// sensorResourceLabelSelector returns label selector of the sensor of the context
-func (src *sResourceCtx) sensorResourceLabelSelector() (labels.Selector, error) {
- req, err := labels.NewRequirement(common.LabelSensorName, selection.Equals, []string{src.s.Name})
- if err != nil {
+// deploymentBuilder builds the deployment specification for the sensor
+func (ctx *sensorContext) deploymentBuilder() (*appv1.Deployment, error) {
+ replicas := int32(1)
+ podTemplateSpec := ctx.sensor.Spec.Template.DeepCopy()
+ if podTemplateSpec.Labels == nil {
+ podTemplateSpec.Labels = map[string]string{}
+ }
+ podTemplateSpec.Labels[common.LabelOwnerName] = ctx.sensor.Name
+ deployment := &appv1.Deployment{
+ ObjectMeta: podTemplateSpec.ObjectMeta,
+ Spec: appv1.DeploymentSpec{
+ Template: *podTemplateSpec,
+ Replicas: &replicas,
+ Selector: &metav1.LabelSelector{
+ MatchLabels: podTemplateSpec.Labels,
+ },
+ },
+ }
+ envVars := []corev1.EnvVar{
+ {
+ Name: common.SensorName,
+ Value: ctx.sensor.Name,
+ },
+ {
+ Name: common.SensorNamespace,
+ Value: ctx.sensor.Namespace,
+ },
+ {
+ Name: common.EnvVarControllerInstanceID,
+ Value: ctx.controller.Config.InstanceID,
+ },
+ }
+ for i, container := range deployment.Spec.Template.Spec.Containers {
+ container.Env = append(container.Env, envVars...)
+ deployment.Spec.Template.Spec.Containers[i] = container
+ }
+ if err := controllerscommon.SetObjectMeta(ctx.sensor, deployment, v1alpha1.SchemaGroupVersionKind); err != nil {
return nil, err
}
- return labels.NewSelector().Add(*req), nil
+ return deployment, nil
}
-// createSensorService creates a service
-func (src *sResourceCtx) createSensorService(svc *corev1.Service) (*corev1.Service, error) {
- return src.controller.kubeClientset.CoreV1().Services(src.s.Namespace).Create(svc)
+// createDeployment creates a deployment for the sensor
+func (ctx *sensorContext) createDeployment(deployment *appv1.Deployment) (*appv1.Deployment, error) {
+ return ctx.controller.k8sClient.AppsV1().Deployments(deployment.Namespace).Create(deployment)
}
-// deleteSensorService deletes a given service
-func (src *sResourceCtx) deleteSensorService(svc *corev1.Service) error {
- return src.controller.kubeClientset.CoreV1().Services(src.s.Namespace).Delete(svc.Name, &metav1.DeleteOptions{})
+// createService creates a service for the sensor
+func (ctx *sensorContext) createService(service *corev1.Service) (*corev1.Service, error) {
+ return ctx.controller.k8sClient.CoreV1().Services(service.Namespace).Create(service)
}
-// getSensorService returns the service of sensor
-func (src *sResourceCtx) getSensorService() (*corev1.Service, error) {
- selector, err := src.sensorResourceLabelSelector()
- if err != nil {
- return nil, err
- }
- svcs, err := src.controller.svcInformer.Lister().Services(src.s.Namespace).List(selector)
+// updateDeployment updates the deployment for the sensor
+func (ctx *sensorContext) updateDeployment() (*appv1.Deployment, error) {
+ newDeployment, err := ctx.deploymentBuilder()
if err != nil {
return nil, err
}
- if len(svcs) == 0 {
- return nil, nil
- }
- return svcs[0], nil
-}
-// newSensorService returns a new service that exposes sensor.
-func (src *sResourceCtx) newSensorService() (*corev1.Service, error) {
- serviceTemplateSpec := src.getServiceTemplateSpec()
- if serviceTemplateSpec == nil {
- return nil, nil
+ currentMetadata := ctx.sensor.Status.Resources.Deployment
+ if currentMetadata == nil {
+ return nil, errors.New("deployment metadata is expected to be set in gateway object")
}
- service := &corev1.Service{
- ObjectMeta: serviceTemplateSpec.ObjectMeta,
- Spec: serviceTemplateSpec.Spec,
- }
- if service.Namespace == "" {
- service.Namespace = src.s.Namespace
- }
- if service.Name == "" {
- service.Name = common.DefaultServiceName(src.s.Name)
- }
- err := src.SetObjectMeta(src.s, service)
- return service, err
-}
-// getSensorPod returns the pod of sensor
-func (src *sResourceCtx) getSensorPod() (*corev1.Pod, error) {
- selector, err := src.sensorResourceLabelSelector()
- if err != nil {
- return nil, err
- }
- pods, err := src.controller.podInformer.Lister().Pods(src.s.Namespace).List(selector)
+ currentDeployment, err := ctx.controller.k8sClient.AppsV1().Deployments(currentMetadata.Namespace).Get(currentMetadata.Name, metav1.GetOptions{})
if err != nil {
+ if apierror.IsNotFound(err) {
+ return ctx.controller.k8sClient.AppsV1().Deployments(newDeployment.Namespace).Create(newDeployment)
+ }
return nil, err
}
- if len(pods) == 0 {
- return nil, nil
- }
- return pods[0], nil
-}
-// createSensorPod creates a pod of sensor
-func (src *sResourceCtx) createSensorPod(pod *corev1.Pod) (*corev1.Pod, error) {
- return src.controller.kubeClientset.CoreV1().Pods(src.s.Namespace).Create(pod)
+ if currentDeployment.Annotations != nil && currentDeployment.Annotations[common.AnnotationResourceSpecHash] != newDeployment.Annotations[common.AnnotationResourceSpecHash] {
+ if err := ctx.controller.k8sClient.AppsV1().Deployments(currentDeployment.Namespace).Delete(currentDeployment.Name, &metav1.DeleteOptions{}); err != nil {
+ return nil, err
+ }
+ return ctx.controller.k8sClient.AppsV1().Deployments(newDeployment.Namespace).Create(newDeployment)
+ }
+ return currentDeployment, nil
}
-// deleteSensorPod deletes a given pod
-func (src *sResourceCtx) deleteSensorPod(pod *corev1.Pod) error {
- return src.controller.kubeClientset.CoreV1().Pods(src.s.Namespace).Delete(pod.Name, &metav1.DeleteOptions{})
-}
+// updateService updates the service for the sensor
+func (ctx *sensorContext) updateService() (*corev1.Service, error) {
+ isHttpTransport := ctx.sensor.Spec.EventProtocol.Type == apicommon.HTTP
+ currentMetadata := ctx.sensor.Status.Resources.Service
-// newSensorPod returns a new pod of sensor
-func (src *sResourceCtx) newSensorPod() (*corev1.Pod, error) {
- podTemplateSpec := src.s.Spec.Template.DeepCopy()
- pod := &corev1.Pod{
- ObjectMeta: podTemplateSpec.ObjectMeta,
- Spec: podTemplateSpec.Spec,
+ if currentMetadata == nil && !isHttpTransport {
+ return nil, nil
}
- if pod.Namespace == "" {
- pod.Namespace = src.s.Namespace
+ if currentMetadata != nil && !isHttpTransport {
+ if err := ctx.controller.k8sClient.CoreV1().Services(currentMetadata.Namespace).Delete(currentMetadata.Name, &metav1.DeleteOptions{}); err != nil {
+ // warning is sufficient instead of halting the entire sensor operation by marking it as failed.
+ ctx.logger.WithField("service-name", currentMetadata.Name).WithError(err).Warnln("failed to delete the current service")
+ }
+ return nil, nil
}
- if pod.Name == "" {
- pod.Name = src.s.Name
+ newService, err := ctx.serviceBuilder()
+ if err != nil {
+ return nil, err
}
- src.setupContainersForSensorPod(pod)
- err := src.SetObjectMeta(src.s, pod)
- return pod, err
-}
-
-// containers required for sensor deployment
-func (src *sResourceCtx) setupContainersForSensorPod(pod *corev1.Pod) {
- // env variables
- envVars := []corev1.EnvVar{
- {
- Name: common.SensorName,
- Value: src.s.Name,
- },
- {
- Name: common.SensorNamespace,
- Value: src.s.Namespace,
- },
- {
- Name: common.EnvVarSensorControllerInstanceID,
- Value: src.controller.Config.InstanceID,
- },
+ if currentMetadata == nil && isHttpTransport {
+ return ctx.controller.k8sClient.CoreV1().Services(newService.Namespace).Create(newService)
}
- for i, container := range pod.Spec.Containers {
- container.Env = append(container.Env, envVars...)
- pod.Spec.Containers[i] = container
+ if currentMetadata == nil {
+ return nil, nil
}
-}
-
-func (src *sResourceCtx) getServiceTemplateSpec() *pc.ServiceTemplateSpec {
- var serviceSpec *pc.ServiceTemplateSpec
- // Create a ClusterIP service to expose sensor in cluster if the event protocol type is HTTP
- if src.s.Spec.EventProtocol.Type == pc.HTTP {
- serviceSpec = &pc.ServiceTemplateSpec{
- Spec: corev1.ServiceSpec{
- Ports: []corev1.ServicePort{
- {
- Port: intstr.Parse(src.s.Spec.EventProtocol.Http.Port).IntVal,
- TargetPort: intstr.FromInt(int(intstr.Parse(src.s.Spec.EventProtocol.Http.Port).IntVal)),
- },
- },
- Type: corev1.ServiceTypeClusterIP,
- Selector: map[string]string{
- common.LabelSensorName: src.s.Name,
- common.LabelKeySensorControllerInstanceID: src.controller.Config.InstanceID,
- },
- },
+ if currentMetadata.Annotations != nil && currentMetadata.Annotations[common.AnnotationResourceSpecHash] != newService.Annotations[common.AnnotationResourceSpecHash] {
+ if err := ctx.controller.k8sClient.CoreV1().Services(currentMetadata.Namespace).Delete(currentMetadata.Name, &metav1.DeleteOptions{}); err != nil {
+ return nil, err
}
+ return ctx.controller.k8sClient.CoreV1().Services(newService.Namespace).Create(newService)
}
- return serviceSpec
+ return ctx.controller.k8sClient.CoreV1().Services(currentMetadata.Namespace).Get(currentMetadata.Name, metav1.GetOptions{})
}
diff --git a/controllers/sensor/resource_test.go b/controllers/sensor/resource_test.go
new file mode 100644
index 0000000000..3cc3b1b569
--- /dev/null
+++ b/controllers/sensor/resource_test.go
@@ -0,0 +1,170 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sensor
+
+import (
+ "testing"
+
+ "github.com/argoproj/argo-events/common"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
+ "github.com/stretchr/testify/assert"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+var sensorObj = &v1alpha1.Sensor{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "fake-sensor",
+ Namespace: "faker",
+ },
+ Spec: v1alpha1.SensorSpec{
+ Template: &corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "fake-sensor",
+ Namespace: "faker",
+ },
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "fake-sensor",
+ ImagePullPolicy: corev1.PullAlways,
+ Image: "argoproj/sensor",
+ },
+ },
+ },
+ },
+ EventProtocol: &apicommon.EventProtocol{
+ Http: apicommon.Http{
+ Port: "12000",
+ },
+ Type: apicommon.HTTP,
+ },
+ Triggers: []v1alpha1.Trigger{
+ {
+ Template: &v1alpha1.TriggerTemplate{
+ Name: "fake-trigger",
+ GroupVersionResource: &metav1.GroupVersionResource{
+ Group: "k8s.io",
+ Version: "",
+ Resource: "pods",
+ },
+ Source: &v1alpha1.ArtifactLocation{},
+ },
+ },
+ },
+ Dependencies: []v1alpha1.EventDependency{
+ {
+ Name: "fake-gateway:fake-one",
+ },
+ },
+ },
+}
+
+func TestResource_BuildService(t *testing.T) {
+ controller := getController()
+ opctx := newSensorContext(sensorObj.DeepCopy(), controller)
+ service, err := opctx.serviceBuilder()
+ assert.Nil(t, err)
+ assert.NotNil(t, service)
+ assert.NotEmpty(t, service.Annotations[common.AnnotationResourceSpecHash])
+}
+
+func TestResource_BuildDeployment(t *testing.T) {
+ controller := getController()
+ opctx := newSensorContext(sensorObj.DeepCopy(), controller)
+ deployment, err := opctx.deploymentBuilder()
+ assert.Nil(t, err)
+ assert.NotNil(t, deployment)
+ assert.NotEmpty(t, deployment.Annotations[common.AnnotationResourceSpecHash])
+ assert.Equal(t, int(*deployment.Spec.Replicas), 1)
+}
+
+func TestResource_SetupContainers(t *testing.T) {
+ controller := getController()
+ opctx := newSensorContext(sensorObj.DeepCopy(), controller)
+ deployment, err := opctx.deploymentBuilder()
+ assert.Nil(t, err)
+ assert.NotNil(t, deployment)
+ assert.Equal(t, deployment.Spec.Template.Spec.Containers[0].Env[0].Name, common.SensorName)
+ assert.Equal(t, deployment.Spec.Template.Spec.Containers[0].Env[0].Value, opctx.sensor.Name)
+ assert.Equal(t, deployment.Spec.Template.Spec.Containers[0].Env[1].Name, common.SensorNamespace)
+ assert.Equal(t, deployment.Spec.Template.Spec.Containers[0].Env[1].Value, opctx.sensor.Namespace)
+ assert.Equal(t, deployment.Spec.Template.Spec.Containers[0].Env[2].Name, common.EnvVarControllerInstanceID)
+ assert.Equal(t, deployment.Spec.Template.Spec.Containers[0].Env[2].Value, controller.Config.InstanceID)
+}
+
+func TestResource_UpdateResources(t *testing.T) {
+ controller := getController()
+ ctx := newSensorContext(sensorObj.DeepCopy(), controller)
+ err := ctx.createSensorResources()
+ assert.Nil(t, err)
+
+ tests := []struct {
+ name string
+ updateFunc func()
+ testFunc func(t *testing.T, oldResources *v1alpha1.SensorResources)
+ }{
+ {
+ name: "update deployment when sensor template is updated",
+ updateFunc: func() {
+ ctx.sensor.Spec.Template.Spec.Containers[0].ImagePullPolicy = corev1.PullIfNotPresent
+ },
+ testFunc: func(t *testing.T, oldResources *v1alpha1.SensorResources) {
+ oldDeployment := oldResources.Deployment
+ deployment, err := ctx.controller.k8sClient.AppsV1().Deployments(ctx.sensor.Status.Resources.Deployment.Namespace).Get(ctx.sensor.Status.Resources.Deployment.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, deployment)
+ assert.NotEqual(t, oldDeployment.Annotations[common.AnnotationResourceSpecHash], deployment.Annotations[common.AnnotationResourceSpecHash])
+
+ oldService := oldResources.Service
+ service, err := ctx.controller.k8sClient.CoreV1().Services(ctx.sensor.Status.Resources.Service.Namespace).Get(ctx.sensor.Status.Resources.Service.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, service)
+ assert.Equal(t, oldService.Annotations[common.AnnotationResourceSpecHash], service.Annotations[common.AnnotationResourceSpecHash])
+ },
+ },
+ {
+ name: "update event protocol to NATS and check the service deletion",
+ updateFunc: func() {
+ ctx.sensor.Spec.EventProtocol.Type = apicommon.NATS
+ },
+ testFunc: func(t *testing.T, oldResources *v1alpha1.SensorResources) {
+ oldDeployment := oldResources.Deployment
+ deployment, err := ctx.controller.k8sClient.AppsV1().Deployments(ctx.sensor.Status.Resources.Deployment.Namespace).Get(ctx.sensor.Status.Resources.Deployment.Name, metav1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, deployment)
+ assert.Equal(t, oldDeployment.Annotations[common.AnnotationResourceSpecHash], deployment.Annotations[common.AnnotationResourceSpecHash])
+
+ oldService := oldResources.Service
+ service, err := ctx.controller.k8sClient.CoreV1().Services(oldService.Namespace).Get(oldService.Name, metav1.GetOptions{})
+ assert.NotNil(t, err)
+ assert.Nil(t, service)
+ },
+ },
+ }
+
+ for _, test := range tests {
+ oldResources := ctx.sensor.Status.Resources.DeepCopy()
+ t.Run(test.name, func(t *testing.T) {
+ test.updateFunc()
+ err := ctx.updateSensorResources()
+ assert.Nil(t, err)
+ test.testFunc(t, oldResources)
+ })
+ }
+}
diff --git a/controllers/sensor/state_test.go b/controllers/sensor/state_test.go
deleted file mode 100644
index 3142d1ff3b..0000000000
--- a/controllers/sensor/state_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package sensor
-
-import (
- "testing"
-
- "github.com/argoproj/argo-events/common"
- apicommon "github.com/argoproj/argo-events/pkg/apis/common"
- "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
- fakesensor "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned/fake"
- "github.com/smartystreets/goconvey/convey"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-func TestSensorState(t *testing.T) {
- fakeSensorClient := fakesensor.NewSimpleClientset()
- logger := common.NewArgoEventsLogger()
- sn := &v1alpha1.Sensor{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-sensor",
- Namespace: "test",
- },
- }
-
- convey.Convey("Given a sensor", t, func() {
- convey.Convey("Create the sensor", func() {
- sn, err := fakeSensorClient.ArgoprojV1alpha1().Sensors(sn.Namespace).Create(sn)
- convey.So(err, convey.ShouldBeNil)
- convey.So(sn, convey.ShouldNotBeNil)
- })
-
- convey.Convey("Initialize a new node", func() {
- status := InitializeNode(sn, "first_node", v1alpha1.NodeTypeEventDependency, logger)
- convey.So(status.Phase, convey.ShouldEqual, v1alpha1.NodePhaseNew)
- })
-
- convey.Convey("Persist updates to sn", func() {
- sensor, err := PersistUpdates(fakeSensorClient, sn, "1", logger)
- convey.So(err, convey.ShouldBeNil)
- convey.So(len(sensor.Status.Nodes), convey.ShouldEqual, 1)
- })
-
- convey.Convey("Mark sn node state to active", func() {
- status := MarkNodePhase(sn, "first_node", v1alpha1.NodeTypeEventDependency, v1alpha1.NodePhaseActive, &apicommon.Event{
- Payload: []byte("test payload"),
- }, logger)
- convey.So(status.Phase, convey.ShouldEqual, v1alpha1.NodePhaseActive)
- })
-
- convey.Convey("Reapply sn update", func() {
- err := ReapplyUpdate(fakeSensorClient, sn)
- convey.So(err, convey.ShouldBeNil)
- })
-
- convey.Convey("Fetch sn and check updates are applied", func() {
- sensor, err := fakeSensorClient.ArgoprojV1alpha1().Sensors(sn.Namespace).Get(sn.Name, metav1.GetOptions{})
- convey.So(err, convey.ShouldBeNil)
- convey.So(len(sensor.Status.Nodes), convey.ShouldEqual, 1)
- convey.Convey("Get the first_node node", func() {
- node := GetNodeByName(sensor, "first_node")
- convey.So(node, convey.ShouldNotBeNil)
- convey.So(node.Phase, convey.ShouldEqual, v1alpha1.NodePhaseActive)
- })
- })
- })
-}
diff --git a/controllers/sensor/validate.go b/controllers/sensor/validate.go
index 5c1045e27a..abce9a72cb 100644
--- a/controllers/sensor/validate.go
+++ b/controllers/sensor/validate.go
@@ -22,7 +22,6 @@ import (
"time"
"github.com/Knetic/govaluate"
-
"github.com/argoproj/argo-events/common"
pc "github.com/argoproj/argo-events/pkg/apis/common"
"github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
diff --git a/controllers/sensor/validate_test.go b/controllers/sensor/validate_test.go
index 55ae563275..590b50eb88 100644
--- a/controllers/sensor/validate_test.go
+++ b/controllers/sensor/validate_test.go
@@ -20,26 +20,22 @@ import (
"fmt"
"github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
"github.com/ghodss/yaml"
+ "github.com/stretchr/testify/assert"
"io/ioutil"
"testing"
-
- "github.com/smartystreets/goconvey/convey"
)
func TestValidateSensor(t *testing.T) {
dir := "../../examples/sensors"
- convey.Convey("Validate list of sensor", t, func() {
- files, err := ioutil.ReadDir(dir)
- convey.So(err, convey.ShouldBeNil)
- for _, file := range files {
- fmt.Println("filename: ", file.Name())
- content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", dir, file.Name()))
- convey.So(err, convey.ShouldBeNil)
- var sensor *v1alpha1.Sensor
- err = yaml.Unmarshal([]byte(content), &sensor)
- convey.So(err, convey.ShouldBeNil)
- err = ValidateSensor(sensor)
- convey.So(err, convey.ShouldBeNil)
- }
- })
+ files, err := ioutil.ReadDir(dir)
+ assert.Nil(t, err)
+ for _, file := range files {
+ content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", dir, file.Name()))
+ assert.Nil(t, err)
+ var sensor *v1alpha1.Sensor
+ err = yaml.Unmarshal([]byte(content), &sensor)
+ assert.Nil(t, err)
+ err = ValidateSensor(sensor)
+ assert.Nil(t, err)
+ }
}
diff --git a/docs/assets/argo.png b/docs/assets/argo.png
new file mode 100644
index 0000000000..1560ce2490
Binary files /dev/null and b/docs/assets/argo.png differ
diff --git a/docs/assets/gateway.png b/docs/assets/gateway.png
new file mode 100644
index 0000000000..f33bd98921
Binary files /dev/null and b/docs/assets/gateway.png differ
diff --git a/docs/assets/gateways.png b/docs/assets/gateways.png
deleted file mode 100644
index 39d040e225..0000000000
Binary files a/docs/assets/gateways.png and /dev/null differ
diff --git a/docs/assets/sensor.png b/docs/assets/sensor.png
index 1a96f1791e..726dfe007b 100644
Binary files a/docs/assets/sensor.png and b/docs/assets/sensor.png differ
diff --git a/docs/communication.md b/docs/communication.md
deleted file mode 100644
index 3a93d4135e..0000000000
--- a/docs/communication.md
+++ /dev/null
@@ -1,55 +0,0 @@
-# Persisting Events
-
-1. [How gateway forwards events to sensor](#how-gateway-forwards-events-to-sensor)
-2. [HTTP](#http)
-3. [NATS Standard & Streaming](#nats-standard--streaming)
-
-## How gateway forwards events to sensor?
-There are two ways an event is dispatched from gateway to sensor:
-
- 1. **HTTP**
- 2. **NATS standard or streaming service**
-
-
-
-
-
-
-
-
-
-
-## HTTP
-* To use HTTP as communication channel between gateway and sensor, you need to configure the `eventProtocol` in gateway as HTTP. Then, you need to specify
-the port on which the HTTP server in sensor will be running. The HTTP server is spun up automatically with the port configured in sensor spec when
-you create the sensor with `eventProtocol` as HTTP.
-
-* You don't need to specify address of sensor pod. The sensor pod is exposed through a ClusterIP service. This is taken care by the sensor controller.
-The name of the sensor service is formatted in a specific way by sensor controller so that gateway can create the service name from sensor name.
-This is how gateway gets the name of the service exposing sensor. Using the port defined in the spec, gateway makes HTTP POST requests to sensor service.
-
- * [**Gateway Example**](https://github.com/argoproj/argo-events/blob/master/examples/gateways/webhook-http.yaml)
- * [**Sensor Example**](https://github.com/argoproj/argo-events/blob/master/examples/sensors/webhook-http.yaml)
-
-## NATS Standard & Streaming
-* To use NATS standard or streaming as communication channel between gateway and sensor, you need to configure the `eventProtocol` in gateway as NATS and type as either `Standard` or `Streaming`.
-You can read more about NATS [here](https://nats.io/documentation/)
-
-* In case of NATS, gateway doesn't need to be aware of sensors because the gateway acts as a publisher and sensors act as subscriber.
-
-* You can store events in external persistent volume. This gives you ability to replay events in future for any reasons.
-Read more about storing NATS messages [here](https://nats.io/blog/use-cases-for-persistent-logs-with-nats-streaming/)
-
-* NATS also facilitates the components that are not part of Argo-Events to consume events generated by gateway.
-
-* For a sensor to consume the events from NATS, the `eventProtocol` needs to specified as NATS. You can then configure the Standard or Streaming connection detail in `eventProtocol`.
-
- 1. Standard NATS example
- * [**Gateway Example**](https://github.com/argoproj/argo-events/blob/master/examples/gateways/webhook-nats-standard.yaml)
- * [**Sensor Example**](https://github.com/argoproj/argo-events/blob/master/examples/sensors/webhook-nats.yaml)
-
- 2. Streaming NATS example
- * [**Gateway Example**](https://github.com/argoproj/argo-events/blob/master/examples/gateways/webhook-nats-streaming.yaml)
- * [**Sensor Example**](https://github.com/argoproj/argo-events/blob/master/examples/sensors/webhook-nats-streaming.yaml)
-
- **Note**: The framework **_does not_** provide a NATS installation. You can follow [this guide](https://github.com/nats-io/nats-streaming-operator) to install NATS onto your cluster.
diff --git a/docs/concepts/event_source.md b/docs/concepts/event_source.md
new file mode 100644
index 0000000000..dec9bb6eec
--- /dev/null
+++ b/docs/concepts/event_source.md
@@ -0,0 +1,7 @@
+# Event Source
+
+Event Source are configuration store for a gateway to select from. The configuration stored in an Event Source is used by a gateway to consume events from
+external entities like AWS SNS, SQS, GCP PubSub, Webhooks etc.
+
+## Specification
+Complete specification is available [here](https://github.com/argoproj/argo-events/blob/master/api/event-source.md).
diff --git a/docs/concepts/gateway.md b/docs/concepts/gateway.md
new file mode 100644
index 0000000000..74d06bf956
--- /dev/null
+++ b/docs/concepts/gateway.md
@@ -0,0 +1,20 @@
+# Gateway
+
+## What is a gateway?
+A gateway consumes events from outside entities, transforms them into the [cloudevents specification](https://github.com/cloudevents/spec) compliant events and dispatches them to sensors.
+
+
+
+
+
+
+
+
+
+## Relation between Gateway & Event Source
+Event Source are event configuration store for a gateway. The configuration stored in an Event Source is used by a gateway to consume events from
+external entities like AWS SNS, SQS, GCP PubSub, Webhooks etc.
+
+## Specification
+Complete specification is available [here](https://github.com/argoproj/argo-events/blob/master/api/gateway.md)
+
diff --git a/docs/parameterization.md b/docs/concepts/parameterization.md
similarity index 100%
rename from docs/parameterization.md
rename to docs/concepts/parameterization.md
diff --git a/docs/concepts/sensor.md b/docs/concepts/sensor.md
new file mode 100644
index 0000000000..ba50aa63f0
--- /dev/null
+++ b/docs/concepts/sensor.md
@@ -0,0 +1,16 @@
+# Sensor
+Sensors define a set of event dependencies (inputs) and triggers (outputs).
+
+
+
+
+
+
+
+
+## What is an event dependency?
+A dependency is an event the sensor is expecting to happen. It is defined as "gateway-name:event-source-name".
+Also, you can use [globs](https://github.com/gobwas/glob#syntax) to catch a set of events (e.g. "gateway-name:*").
+
+## Specification
+Complete specification is available [here](https://github.com/argoproj/argo-events/blob/master/api/sensor.md).
diff --git a/docs/trigger.md b/docs/concepts/trigger.md
similarity index 95%
rename from docs/trigger.md
rename to docs/concepts/trigger.md
index 7e709cd691..6c52691290 100644
--- a/docs/trigger.md
+++ b/docs/concepts/trigger.md
@@ -5,8 +5,9 @@ Trigger is the resource executed by sensor once the event dependencies are resol
## How to define a trigger?
The framework provides support to fetch trigger resources from different sources.
+
### Inline
-Inlined artifacts are included directly within the sensor resource and decoded as a string. [Example](https://github.com/argoproj/argo-events/tree/master/examples/sensors/artifact.yaml)
+Inlined artifacts are included directly within the sensor resource and decoded as a string. [Example](https://github.com/argoproj/argo-events/tree/master/examples/sensors/minio.yaml)
### S3
Argo Events uses the [minio-go](https://github.com/minio/minio-go) client for access to any Amazon S3 compatible object store. [Example](https://github.com/argoproj/argo-events/tree/master/examples/sensors/context-filter-webhook.yaml)
@@ -24,7 +25,7 @@ Artifacts stored in Kubernetes configmap are accessed using the key. [Example](h
Artifacts stored in either public or private Git repository. [Example](https://github.com/argoproj/argo-events/blob/master/examples/sensors/trigger-source-git.yaml)
### Resource
-Artifacts defined as generic K8s resource template. This is specially useful if you use tools like Kustomize to generate the sensor spec. [Example](https://github.com/argoproj/argo-events/blob/master/examples/sensors/trigger-resource.yaml)
+Artifacts defined as generic K8s resource template. This is specially useful if you use tools like Kustomize to generate the sensor spec.
## What resource types are supported out of box?
- [Argo Workflow](https://github.com/argoproj/argo)
diff --git a/docs/controllers.md b/docs/controllers.md
index 766f2d11ac..da011de24a 100644
--- a/docs/controllers.md
+++ b/docs/controllers.md
@@ -1,11 +1,11 @@
## Controllers
-* Sensor and Gateway controllers are the components which manage Sensor and Gateway resources respectively.
-* Sensor and Gateway are Kubernetes Custom Resources. For more information on K8 CRDs visit [here.](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)
+* Sensor and Gateway controllers are the components which manage Sensor and Gateway objects respectively.
+* Sensor and Gateway are Kubernetes Custom Resources. For more information on K8 CRDs visit [here.](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)
-### Controller configmap
-Defines the `instance-id` and the `namespace` for controller configmap
+### Controller Configmap
+Defines the `instance-id` and the `namespace` for the controller.
e.g.
```yaml
# The gateway-controller configmap includes configuration information for the gateway-controller
@@ -21,16 +21,10 @@ data:
`namespace`: If you don't provide namespace, controller will watch all namespaces for gateway resource.
-`instanceID`: it is used to map a gateway or sensor to a controller.
+`instanceID`: it is used to map a gateway or sensor object to a controller.
e.g. when you create a gateway with label `gateways.argoproj.io/gateway-controller-instanceid: argo-events`, a
- controller with label `argo-events` will process that gateway. `instanceID` for controller are managed using [controller-configmap](https://raw.githubusercontent.com/argoproj/argo-events/master/hack/k8s/manifests/gateway-controller-configmap.yaml).
-Basically `instanceID` is used to horizontally scale controllers, so you won't end up overwhelming a controller with large
- number of gateways or sensors. Also keep in mind that `instanceID` has nothing to do with namespace where you are
- deploying controllers and gateways/sensors.
+ controller with label `argo-events` will process that gateway.
-
-### Gateway controller
-Gateway controller watches gateway resource and manages lifecycle of a gateway.
-
-### Sensor controller
-Sensor controller watches sensor resource and manages lifecycle of a sensor.
+`instanceID` is used to horizontally scale controllers, so you won't end up overwhelming a single controller with large
+ number of gateways or sensors. Also keep in mind that `instanceID` has nothing to do with namespace where you are
+ deploying controllers and gateways/sensors objects.
diff --git a/docs/developer_guide.md b/docs/developer_guide.md
new file mode 100644
index 0000000000..ed6e1cde5b
--- /dev/null
+++ b/docs/developer_guide.md
@@ -0,0 +1,89 @@
+# Developer Guider
+
+## Setup your DEV environment
+Argo Events is native to Kubernetes so you'll need a running Kubernetes cluster. This guide includes steps for `Minikube` for local development, but if you have another cluster you can ignore the Minikube specific step 3.
+
+### Requirements
+- Golang 1.11
+- Docker
+- dep
+
+### Installation & Setup
+
+#### 1. Get the project
+```
+go get github.com/argoproj/argo-events
+cd $GOPATH/src/github.com/argoproj/argo-events
+```
+
+#### 2. Vendor dependencies
+```
+dep ensure -vendor-only
+```
+
+#### 3. Start Minikube and point Docker Client to Minikube's Docker Daemon
+```
+minikube start
+eval $(minikube docker-env)
+```
+
+#### 5. Build the project
+```
+make all
+```
+
+Follow [README](README.md#install) to install components.
+
+### Changing Types
+If you're making a change to the `pkg/apis` package, please ensure you re-run the K8 code-generator scripts found in the `/hack` folder. First, ensure you have the `generate-groups.sh` script at the path: `vendor/k8s.io/code-generator/`. Next run the following commands in order:
+```
+$ make codegen
+```
+
+
+## How to write a custom gateway?
+To implement a custom gateway, you need to create a gRPC server and implement the service defined below.
+The framework code acts as a gRPC client consuming event stream from gateway server.
+
+
+
+
+
+
+
+
+
+
+### Proto Definition
+1. The proto file is located [here](https://github.com/argoproj/argo-events/blob/master/gateways/eventing.proto)
+
+2. If you choose to implement the gateway in `Go`, then you can find generated client stubs [here](https://github.com/argoproj/argo-events/blob/master/gateways/eventing.pb.go)
+
+3. To create stubs in other languages, head over to [gRPC website](https://grpc.io/)
+
+4. Service,
+
+ /**
+ * Service for handling event sources.
+ */
+ service Eventing {
+ // StartEventSource starts an event source and returns stream of events.
+ rpc StartEventSource(EventSource) returns (stream Event);
+ // ValidateEventSource validates an event source.
+ rpc ValidateEventSource(EventSource) returns (ValidEventSource);
+ }
+
+
+### Available Environment Variables to Server
+
+ | Field | Description |
+ | ------------------------------- | ------------------------------------------------ |
+ | GATEWAY_NAMESPACE | K8s namespace of the gateway |
+ | GATEWAY_EVENT_SOURCE_CONFIG_MAP | K8s configmap containing event source |
+ | GATEWAY_NAME | name of the gateway |
+ | GATEWAY_CONTROLLER_INSTANCE_ID | gateway controller instance id |
+ | GATEWAY_CONTROLLER_NAME | gateway controller name |
+ | GATEWAY_SERVER_PORT | Port on which the gateway gRPC server should run |
+
+### Implementation
+ You can follow existing implementations [here](https://github.com/argoproj/argo-events/tree/master/gateways/core)
diff --git a/docs/gateway.md b/docs/gateway.md
deleted file mode 100644
index a711d9baa2..0000000000
--- a/docs/gateway.md
+++ /dev/null
@@ -1,179 +0,0 @@
-# Gateway
-
-## What is a gateway?
-A gateway consumes events from event sources, transforms them into the [cloudevents specification](https://github.com/cloudevents/spec) compliant events and dispatches them to sensors.
-
-
-
-
-
-
-
-
-
-## Components
-A gateway has two components:
-
- 1. gateway-client: It creates one or more gRPC clients depending on event sources configurations, consumes events from server, transforms these events into cloudevents and dispatches them to sensors.
-
- 2. gateway-server: It is a gRPC server that consumes events from event sources and streams them to gateway client.
-
-## Core gateways
-
- 1. **Calendar**:
- Events produced are based on either a [cron](https://crontab.guru/) schedule or an [interval duration](https://golang.org/pkg/time/#ParseDuration). In addition, calendar gateway supports a `recurrence` field in which to specify special exclusion dates for which this gateway will not produce an event.
-
- 2. **Webhooks**:
- Webhook gateway exposes REST API endpoints. The request received on these endpoints are treated as events. See Request Methods in RFC7231 to define the HTTP REST endpoint.
-
- 3. **Kubernetes Resources**:
- Resource gateway supports watching Kubernetes resources. Users can specify `group`, `version`, `kind`, and filters including prefix of the object name, labels, annotations, and createdBy time.
-
- 4. **Artifacts**:
- Artifact gateway supports S3 `bucket-notifications` via [Minio](https://docs.minio.io/docs/minio-bucket-notification-guide). Note that a supported notification target must be running, exposed.
-
- 5. **Streams**:
- Stream gateways contain a generic specification for messages received on a queue and/or though messaging server. The following are the stream gateways offered out of box:
-
- 1. **NATS**:
- [Nats](https://nats.io/) is an open-sourced, lightweight, secure, and scalable messaging system for cloud native applications and microservices architecture. It is currently a hosted CNCF Project.
-
- 2. **MQTT**:
- [MMQP](http://mqtt.org/) is a M2M "Internet of Things" connectivity protocol (ISO/IEC PRF 20922) designed to be extremely lightweight and ideal for mobile applications. Some broker implementations can be found [here](https://github.com/mqtt/mqtt.github.io/wiki/brokers).
-
- 3. **Kafka**:
- [Apache Kafka](https://kafka.apache.org/) is a distributed streaming platform. We use Shopify's [sarama](https://github.com/Shopify/sarama) client for consuming Kafka messages.
-
- 4. **AMQP**:
- [AMQP](https://www.amqp.org/) is a open standard messaging protocol (ISO/IEC 19464). There are a variety of broker implementations including, but not limited to the following:
- - [Apache ActiveMQ](http://activemq.apache.org/)
- - [Apache Qpid](https://qpid.apache.org/)
- - [StormMQ](http://stormmq.com/)
- - [RabbitMQ](https://www.rabbitmq.com/)
-
- You can find core gateways [here](https://github.com/argoproj/argo-events/tree/master/gateways/core)
-
-## Community gateways
-You can find gateways built by the community [here](https://github.com/argoproj/argo-events/tree/master/gateways/community). New gateway contributions are always welcome.
-
-## Example
-
- apiVersion: argoproj.io/v1alpha1
- kind: Gateway
- metadata:
- name: webhook-gateway
- labels:
- # gateway controller with instanceId "argo-events" will process this gateway
- gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
- spec:
- type: "webhook"
- eventSource: "webhook-event-source"
- processorPort: "9330"
- eventProtocol:
- type: "HTTP"
- http:
- port: "9300"
- template:
- metadata:
- name: "webhook-gateway-http"
- labels:
- gateway-name: "webhook-gateway"
- spec:
- containers:
- - name: "gateway-client"
- image: "argoproj/gateway-client"
- imagePullPolicy: "Always"
- command: ["/bin/gateway-client"]
- - name: "webhook-events"
- image: "argoproj/webhook-gateway"
- imagePullPolicy: "Always"
- command: ["/bin/webhook-gateway"]
- serviceAccountName: "argo-events-sa"
- service:
- metadata:
- name: webhook-gateway-svc
- spec:
- selector:
- gateway-name: "webhook-gateway"
- ports:
- - port: 12000
- targetPort: 12000
- type: LoadBalancer
- watchers:
- sensors:
- - name: "webhook-sensor"
-
-
-The gateway `spec` has following fields:
-
-1. `type`: Type of the gateway. This is defined by the user.
-
-2. `eventSource`: Refers to K8s configmap that holds the list of event sources. You can use `namespace/configmap-name` syntax to refer the configmap in a different namespace.
-
-3. `processorPort`: This is a gateway server port. You can leave this to `9330` unless you really have to change it to a different port.
-
-4. `eventProtocol`: Communication protocol between sensor and gateway. For more information, head over to [communication](./communication.md)
-
-5. `template`: Defines the specification for gateway pod.
-
-6. `service`: Specification of a K8s service to expose the gateway pod.
-
-7. `watchers`: List of sensors to which events must be dispatched.
-
-## Managing Event Sources
- * The event sources configurations are managed using K8s configmap. Once the gateway resource is created with the configmap reference in it's spec, it starts watching the configmap.
- The `gateway-client` sends each event source configuration to `gateway-server` over gRPC. The `gateway-server` then parses the configuration to start consuming events from
- external event producing entity.
-
- * You can modify K8s configmap containing event sources configurations anytime and `gateway-client` will intelligently pick new/deleted configurations and send them over to `gateway-server` to either
- start or stop the event sources.
-
-## How to write a custom gateway?
-To implement a custom gateway, you need to create a gRPC server and implement the service defined below.
-The framework code acts as a gRPC client consuming event stream from gateway server.
-
-
-
-
-
-
-
-
-
-
-### Proto Definition
-1. The proto file is located [here](https://github.com/argoproj/argo-events/blob/master/gateways/eventing.proto)
-
-2. If you choose to implement the gateway in `Go`, then you can find generated client stubs [here](https://github.com/argoproj/argo-events/blob/master/gateways/eventing.pb.go)
-
-3. To create stubs in other languages, head over to [gRPC website](https://grpc.io/)
-
-4. Service,
-
- /**
- * Service for handling event sources.
- */
- service Eventing {
- // StartEventSource starts an event source and returns stream of events.
- rpc StartEventSource(EventSource) returns (stream Event);
- // ValidateEventSource validates an event source.
- rpc ValidateEventSource(EventSource) returns (ValidEventSource);
- }
-
-
-### Available Environment Variables to Server
-
- | Field | Description |
- | ------------------------------- | ------------------------------------------------ |
- | GATEWAY_NAMESPACE | K8s namespace of the gateway |
- | GATEWAY_EVENT_SOURCE_CONFIG_MAP | K8s configmap containing event source |
- | GATEWAY_NAME | name of the gateway |
- | GATEWAY_CONTROLLER_INSTANCE_ID | gateway controller instance id |
- | GATEWAY_CONTROLLER_NAME | gateway controller name |
- | GATEWAY_SERVER_PORT | Port on which the gateway gRPC server should run |
-
-### Implementation
- You can follow existing implementations [here](https://github.com/argoproj/argo-events/tree/master/gateways/core)
diff --git a/docs/gateways/artifact.md b/docs/gateways/artifact.md
deleted file mode 100644
index 127a410652..0000000000
--- a/docs/gateways/artifact.md
+++ /dev/null
@@ -1,32 +0,0 @@
-# Minio S3
-
-The gateway listens to bucket notifications from Minio S3 server. If you are interested in AWS S3 then
-read [AWS SNS Gateway](aws-sns.md)
-
-## Install Minio
-If you dont have Minio installed already, follow this [link.](https://docs.min.io/docs/deploy-minio-on-kubernetes)
-
-## What types of bucket notifications minio offers?
-Read about [notifications](https://docs.minio.io/docs/minio-bucket-notification-guide.html)
-
-## Event Payload Structure
-Refer [AWS S3 Notitification](https://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html)
-
-## Setup
-
-1. Before you setup gateway and sensor, make sure you have necessary buckets created in Minio.
-
-2. Deploy [event source](https://github.com/argoproj/argo-events/blob/master/examples/event-sources/artifact.yaml) for the gateway. Change the
-event source configmap according to your use case.
-
-3. Deploy the [gateway](https://github.com/argoproj/argo-events/tree/master/examples/gateways/artifact.yaml). Once the gateway pod spins up, check the logs of both `gateway-client`
- and `artifact-gateway` containers and make sure no error occurs.
-
-4. Deploy the [sensor](https://github.com/argoproj/argo-events/tree/master/examples/sensors/artifact.yaml). Once the sensor pod spins up, make sure there
-are no errors in sensor pod.
-
-Drop a file onto `input` bucket and monitor workflows
-
-## How to add new event source for a different bucket?
-Simply edit the event source configmap and add new entry that contains the configuration required to listen to new bucket, save
-the configmap. The gateway will now start listening to both old and new buckets.
diff --git a/docs/gateways/aws-sns.md b/docs/gateways/aws-sns.md
deleted file mode 100644
index 70f3384555..0000000000
--- a/docs/gateways/aws-sns.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# AWS SNS
-
-The gateway listens to notifications from AWS SNS.
-
-## Why is there webhook in the gateway?
-Because one of the ways you can receive notifications from SNS is over http. So, the gateway runs a http server internally.
-Once you create an entry in the event source configmap, the gateway will register the url of the server on AWS.
-All notifications for that topic will then be dispatched by SNS over to the endpoint specified in event source.
-
-The gateway spec defined in `examples` has a `serviceSpec`. This service is used to expose the gateway server to the outside world.
-
-## How to get the URL for the service?
-Depending upon the Kubernetes provider, you can create the Ingress or Route.
-
-## Setup
-
-1. Deploy [gateway](https://github.com/argoproj/argo-events/tree/master/examples/gateways/aws-sns.yaml) before creating event sources because you need to have the gateway pod running and a service backed by the pod, so that you can get the URL for the service.
-
-2. Create the [event source](https://github.com/argoproj/argo-events/tree/master/examples/event-sources/aws-sns.yaml).
-
-3. Deploy the [sensor](https://github.com/argoproj/argo-events/tree/master/examples/sensors/aws-sns.yaml).
-
-## Trigger Workflow
-
-As soon as a message is published on your SNS topic, a workflow will be triggered.
-
\ No newline at end of file
diff --git a/docs/gateways/aws-sqs.md b/docs/gateways/aws-sqs.md
deleted file mode 100644
index 7aa254d35a..0000000000
--- a/docs/gateways/aws-sqs.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# AWS SQS
-
-The gateway consumes messages from AWS SQS queue.
-
-## Setup
-
-1. Deploy the [gateway](https://github.com/argoproj/argo-events/tree/master/examples/gateways/aws-sqs.yaml)
-
-2. Create the [event Source](https://github.com/argoproj/argo-events/tree/master/examples/event-sources/aws-sqs.yaml). Because SQS works on polling, you need to provide a `waitTimeSeconds`.
-
-3. Deploy the [sensor](https://github.com/argoproj/argo-events/tree/master/examples/sensors/aws-sqs.yaml).
-
-## Trigger Workflow
-As soon as there a message is consumed from SQS queue, a workflow will be triggered.
-
-## How to parse JSON payload
-As you know, the SQS message may be plan text or JSON. In case that you will send a JSON structure, you can define the `path` field.
-For example, the SQS message is `{"foo":"bar"}` and on the resourceParameters section will be defined like this:
-```yaml
-resourceParameters:
- - src:
- event: "aws-sqs-gateway:notification-1"
- path: "foo"
- dest: spec.arguments.parameters.0.value
-```
diff --git a/docs/gateways/calendar.md b/docs/gateways/calendar.md
deleted file mode 100644
index e98696cf0e..0000000000
--- a/docs/gateways/calendar.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# Calendar
-
-The gateway helps schedule K8s resources on an interval or on a cron schedule. It is solution for triggering any standard or custom K8s
-resource instead of using CronJob.
-
-## Setup
-1. Create the [event Source](https://github.com/argoproj/argo-events/tree/master/examples/event-sources/calendar.yaml).
-
-2. Deploy the [gateway](https://github.com/argoproj/argo-events/tree/master/examples/gateways/calendar.yaml).
-
-3. Deploy the [sensor](https://github.com/argoproj/argo-events/tree/master/examples/sensors/calendar.yaml).
diff --git a/docs/gateways/file.md b/docs/gateways/file.md
deleted file mode 100644
index cd4c4745e3..0000000000
--- a/docs/gateways/file.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# File
-
-The gateway watches changes to a file within specified directory.
-
-## Where the directory should be?
-The directory can be in the pod's own filesystem or you can mount a persistent volume and refer to a directory.
-Make sure that the directory exists before you create the gateway configmap.
-
-## Setup
-1. Create the [event Source](https://github.com/argoproj/argo-events/tree/master/examples/event-sources/file.yaml).
-
-2. Deploy the [gateway](https://github.com/argoproj/argo-events/tree/master/examples/gateways/file.yaml).
-
-3. Deploy the [sensor](https://github.com/argoproj/argo-events/tree/master/examples/sensors/file.yaml).
-
-## Trigger Workflow
-
-Exec into the gateway pod and go to the directory specified in event source and create a file. That should generate an event causing sensor to trigger a workflow.
-
-## How to listen to notifications from different directories
-Simply edit the event source configmap and add new entry that contains the configuration required to listen to file within different directory and save
-the configmap. The gateway will start listening to file notifications from new directory as well.
diff --git a/docs/gateways/gcp-pubsub.md b/docs/gateways/gcp-pubsub.md
deleted file mode 100644
index d4e1561c78..0000000000
--- a/docs/gateways/gcp-pubsub.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# GCP PubSub
-
-The gateway listens to event streams from google cloud pub sub topics.
-
-Make sure to mount credentials file for authentication in gateway pod and refer the path in `credentialsFile`.
-
-## Setup
-1. Create the [event Source](https://github.com/argoproj/argo-events/tree/master/examples/event-sources/gcp-pubsub.yaml).
-
-2. Deploy the [gateway](https://github.com/argoproj/argo-events/tree/master/examples/gateways/gcp-pubsub.yaml).
-
-3. Deploy the [sensor](https://github.com/argoproj/argo-events/tree/master/examples/sensors/gcp-pubsub.yaml).
-
-## Trigger Workflow
-As soon as there a message is consumed from PubSub topic, a workflow will be triggered.
diff --git a/docs/gateways/github.md b/docs/gateways/github.md
deleted file mode 100644
index acaee48b94..0000000000
--- a/docs/gateways/github.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# Github
-
-The gateway listens to events from GitHub.
-
-## Events types and webhook
-Refer [here](https://developer.github.com/v3/activity/events/types/) for more information on type of events.
-
-Refer [here](https://developer.github.com/v3/repos/hooks/#get-single-hook) to understand the structure of webhook.
-
-The gateway spec defined in `examples` has a `serviceSpec`. This service is used to expose the gateway server and make it reachable from GitHub.
-The event payload dispatched from gateway contains the type of the event in the headers.
-
-## How to get the URL for the service?
-Depending upon the Kubernetes provider, you can create the Ingress or Route.
-
-## Setup
-1. Deploy the [gateway](https://github.com/argoproj/argo-events/tree/master/examples/gateways/github.yaml) before creating the event source configmap, because you need to have the gateway pod running and a service backed by the pod, so that you can get the URL for the service.
-
-2. Create the [event source](https://github.com/argoproj/argo-events/tree/master/examples/event-sources/github.yaml).
-
-3. Deploy the [Sensor](https://github.com/argoproj/argo-events/tree/master/examples/sensors/github.yaml).
-
-## Trigger Workflow
-Depending upon the event you subscribe to, a workflow will be triggered.
diff --git a/docs/gateways/gitlab.md b/docs/gateways/gitlab.md
deleted file mode 100644
index f5d4c7c86c..0000000000
--- a/docs/gateways/gitlab.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# Gitlab
-
-The gateway listens to events from Gitlab.
-
-The gateway spec defined in `examples` has a `serviceSpec`. This service is used to expose the gateway server and make it reachable from Gitlab.
-
-## How to get the URL for the service?
-Depending upon the Kubernetes provider, you can create the Ingress or Route.
-
-## Setup
-
-1. Deploy the [gateway](https://github.com/argoproj/argo-events/tree/master/examples/gateways/gitlab.yaml) before creating the event source configmap,
-because you need to have the gateway pod running and a service backed by the pod, so that you can get the URL for the service.
-
-2. Create the [event Source](https://github.com/argoproj/argo-events/tree/master/examples/event-sources/gitlab.yaml).
-
-3. Deploy the [sensor](https://github.com/argoproj/argo-events/tree/master/examples/sensors/gitlab.yaml).
-
-## Trigger Workflow.
-Depending upon the event you subscribe to, a workflow will be triggered.
-
diff --git a/docs/gateways/resource.md b/docs/gateways/resource.md
deleted file mode 100644
index 689a16364d..0000000000
--- a/docs/gateways/resource.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# Resource
-
-Resource gateway listens to updates on **any** Kubernetes resource.
-
-## Setup
-1. Create the [event Source](https://github.com/argoproj/argo-events/tree/master/examples/event-sources/resource.yaml).
-
-2. Deploy the [gateway](https://github.com/argoproj/argo-events/tree/master/examples/gateways/resource.yaml).
-
-3. Deploy the [sensor](https://github.com/argoproj/argo-events/tree/master/examples/sensors/resource.yaml).
-
diff --git a/docs/gateways/slack.md b/docs/gateways/slack.md
deleted file mode 100644
index 8ce63a25a6..0000000000
--- a/docs/gateways/slack.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# Slack
-
-The gateway listens to events from Slack.
-The gateway will not register the webhook endpoint on Slack. You need to manually do it.
-
-## Setup
-
-1. Deploy the [gateway](https://github.com/argoproj/argo-events/tree/master/examples/gateways/slack.yaml).
-
-2. Create the [event Source](https://github.com/argoproj/argo-events/tree/master/examples/event-sources/slack.yaml).
-
-3. Deploy the [sensor](https://github.com/argoproj/argo-events/tree/master/examples/sensors/slack.yaml).
-
-## Trigger Workflow
-A workflow will be triggered when slack sends an event.
diff --git a/docs/gateways/storage-grid.md b/docs/gateways/storage-grid.md
deleted file mode 100644
index 4089ac9f04..0000000000
--- a/docs/gateways/storage-grid.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# StorageGrid
-
-The gateway listens to bucket notifications from storage grid.
-
-Note: The gateway does not register the webhook endpoint on storage grid. You need to do it manually. This is mainly because limitations of storage grid api.
-The gateway spec defined in `examples` has a `serviceSpec`. This service is used to expose the gateway server and make it reachable from StorageGrid.
-
-## How to get the URL for the service?
-Depending upon the Kubernetes provider, you can create the Ingress or Route.
-
-## Setup
-
-1. Deploy the [gateway](https://github.com/argoproj/argo-events/tree/master/examples/gateways/storage-grid.yaml).
-
-2. Create the [event Source](https://github.com/argoproj/argo-events/tree/master/examples/event-sources/storage-grid.yaml).
-
-3. Deploy the [sensor](https://github.com/argoproj/argo-events/tree/master/examples/sensors/storage-grid.yaml).
-
-4. Configure notifications
-
- Go to your tenant page on StorageGRID
- Create an endpoint with the following values, and click save
-
- Display Name: S3 Notifications
- URI:
- URN: urn:mytext:sns:us-east::my_topic
- Access Key:
- Secret Key:
- Certificate Validation:
-
- Go to the bucket for which you want to configure notifications.
- Enter the following XML string, and click save
-
-
-
-
- Object-Event
- urn:mytext:sns:us-east::my_topic
- s3:ObjectCreated:*
- s3:ObjectRemoved:*
-
-
-
-
-## Trigger Workflow
-Drop a file into the bucket for which you configured the notifications and watch Argo workflow being triggered.
diff --git a/docs/gateways/streams.md b/docs/gateways/streams.md
deleted file mode 100644
index ed818a525c..0000000000
--- a/docs/gateways/streams.md
+++ /dev/null
@@ -1,20 +0,0 @@
-# Streams
-
-A Stream Gateway basically listens to messages on a message queue.
-
-The configuration for an event source is somewhat similar between all stream gateways. We will go through setup of NATS gateway.
-
-## NATS
-
-NATS gateway consumes messages by subscribing to NATS topic.
-
-## Setup
-
-1. Create the [event Source](https://github.com/argoproj/argo-events/tree/master/examples/event-sources/nats.yaml).
-
-2. Deploy the [gateway](https://github.com/argoproj/argo-events/tree/master/examples/gateways/nats.yaml).
-
-3. Deploy the [sensor](https://github.com/argoproj/argo-events/tree/master/examples/sensors/nats.yaml).
-
-## Trigger Workflow
-Publish message to subject `foo`. You might find [this](https://github.com/nats-io/go-nats/tree/master/examples/nats-pub) useful.
diff --git a/docs/gateways/webhook.md b/docs/gateways/webhook.md
deleted file mode 100644
index d3b1531877..0000000000
--- a/docs/gateways/webhook.md
+++ /dev/null
@@ -1,35 +0,0 @@
-# Webhook
-
-The gateway runs one or more http servers in a pod.
-
-## Endpoints
-Endpoints are activate or deactivated at the runtime.
-The gateway pod continuously monitors the event source configmap. If you add a new endpoint entry in the configmap, the server will register it as
-an active endpoint and if you remove an endpoint entry, server will mark that endpoint as inactive.
-
-## Why is there a service spec in gateway spec?
-Because you'd probably want to expose the gateway to the outside world as gateway pod is running http servers.
-If you don't to expose the gateway, just remove the `serviceSpec` from the gateway spec.
-
-## Setup
-
-1. Create the [event source](https://github.com/argoproj/argo-events/tree/master/examples/event-sources/webhook.yaml).
-
-2. Deploy the [gateway](https://github.com/argoproj/argo-events/tree/master/examples/gateways/webhook.yaml).
-
-3. Deploy the [sensor](https://github.com/argoproj/argo-events/tree/master/examples/sensors/webhook.yaml).
-
-## Trigger Workflow
-
-Note: the `WEBHOOK_SERVICE_URL` will differ based on the Kubernetes cluster.
-
- export WEBHOOK_SERVICE_URL=$(minikube service -n argo-events --url )
- echo $WEBHOOK_SERVICE_URL
- curl -d '{"message":"this is my first webhook"}' -H "Content-Type: application/json" -X POST $WEBHOOK_SERVICE_URL/foo
-
-
-Note:
-
-1. If you are facing an issue getting service url by running `minikube service -n argo-events --url `, you can use `kubectl port-forward`
-2. Open another terminal window and enter `kubectl port-forward -n argo-events 9003:`
-3. You can now use `localhost:9003` to query webhook gateway
diff --git a/docs/getting_started.md b/docs/getting_started.md
index e1943fa86f..34e9f791fb 100644
--- a/docs/getting_started.md
+++ b/docs/getting_started.md
@@ -1,28 +1,33 @@
# Getting Started
-Lets deploy a webhook gateway and sensor,
+We are going to set up a gateway, sensor and event-source for webhook. The goal is
+to trigger an Argo workflow upon a HTTP Post request.
- * First, we need to setup event sources for gateway to listen. The event sources for any gateway are managed using K8s configmap.
+ * First, we need to setup event sources for gateway to listen.
- kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/event-sources/webhook.yaml
+ kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/event-sources/webhook.yaml
+
+ The event-source drives the configuration required for a gateway to consume events from external sources.
* Create webhook gateway,
kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/gateways/webhook.yaml
- After running above command, gateway controller will create corresponding gateway pod and a LoadBalancing service.
+ After running above command, gateway controller will create corresponding a pod and service.
* Create webhook sensor,
kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/sensors/webhook.yaml
+
+ Once sensor object is created, sensor controller will create corresponding pod and service.
- Once sensor resource is created, sensor controller will create corresponding sensor pod and a ClusterIP service.
-
- * Once the gateway and sensor pods are running, trigger the webhook via a http POST request to `/example` endpoint.
+ * Once the gateway and sensor pods are running, dispatch a HTTP POST request to `/example` endpoint.
Note: the `WEBHOOK_SERVICE_URL` will differ based on the Kubernetes cluster.
export WEBHOOK_SERVICE_URL=$(minikube service -n argo-events --url webhook-gateway-svc)
+
echo $WEBHOOK_SERVICE_URL
+
curl -d '{"message":"this is my first webhook"}' -H "Content-Type: application/json" -X POST $WEBHOOK_SERVICE_URL/example
Note:
@@ -30,16 +35,16 @@ Lets deploy a webhook gateway and sensor,
minikube service -n argo-events --url webhook-gateway-svc
- You can use port forwarding to access the service
+ You can use port forwarding to access the service as well,
kubectl port-forward
Open another terminal window and enter
- kubectl port-forward -n argo-events 9003:9330
+ kubectl port-forward -n argo-events 12000:12000
- You can now use `localhost:9003` to query webhook gateway
+ You can now use `localhost:12000` to query webhook gateway
- Verify that the Argo workflow was run when the trigger was executed.
+ Verify that an Argo workflow was triggered.
- argo list -n argo-events
+ kubectl -n argo-events get workflows | grep "webhook"
diff --git a/docs/index.md b/docs/index.md
index bf65620c79..eea4f2044a 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -1,9 +1,5 @@
# Argo Events
-
-
-
-
## What is Argo Events?
**Argo Events** is an event-based dependency manager for Kubernetes which helps you define multiple dependencies from a variety of event sources like webhook, s3, schedules, streams etc.
and trigger Kubernetes objects after successful event dependencies resolution.
@@ -26,15 +22,21 @@ and trigger Kubernetes objects after successful event dependencies resolution.
* CloudEvents compliant.
* Ability to manage event sources at runtime.
-## Core Concepts
-The framework is made up of three components:
-
- 1. [**Gateway**](gateway.md) which is implemented as a Kubernetes-native Custom Resource Definition processes events from event source.
-
- 2. [**Sensor**](sensor.md) which is implemented as a Kubernetes-native Custom Resource Definition defines a set of event dependencies and triggers K8s resources.
-
- 3. **Event Source** is a configmap that contains configurations which is interpreted by gateway as source for events producing entity.
-
-## In Nutshell
-Gateway monitors event sources and starts routines in parallel that consume events from entities like S3, Github, SNS, SQS,
-PubSub etc. and dispatch these events to sensor. Sensor upon receiving the events, evaluates the dependencies and triggers Argo workflows or other K8s resources.
+## Event Listeners
+1. AMQP
+2. AWS SNS
+3. AWS SQS
+4. Cron Schedules
+5. GCP PubSub
+6. GitHub
+7. GitLab
+8. HDFS
+9. File Based Events
+10. Kafka
+11. Minio
+12. NATS
+13. MQTT
+14. K8s Resources
+15. Slack
+16. NetApp StorageGrid
+17. Webhooks
diff --git a/docs/installation.md b/docs/installation.md
index 9fd11c0daa..579c648c23 100644
--- a/docs/installation.md
+++ b/docs/installation.md
@@ -1,13 +1,12 @@
# Installation
-
### Requirements
* Kubernetes cluster >v1.9
* Installed the [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) command-line tool >v1.9.0
### Using Helm Chart
-Note: as of today (5 Dec 2019) this method does not work with Helm 3, only Helm 2.
+Note: This method does not work with Helm 3, only Helm 2.
Make sure you have helm client installed and Tiller server is running. To install helm, follow the link.
@@ -18,10 +17,16 @@ Make sure you have helm client installed and Tiller server is running. To instal
2. Install `argo-events` chart
helm install argo/argo-events
-
### Using kubectl
-* Deploy Argo Events SA, Roles, ConfigMap, Sensor Controller and Gateway Controller
+
+#### One Command Installation
+
+1. Deploy Argo Events SA, Roles, ConfigMap, Sensor Controller and Gateway Controller
+
+ kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/master/hack/k8s/manifests/installation.yaml
+
+#### Step-by-Step Installation
1. Create the namespace
@@ -58,3 +63,15 @@ Make sure you have helm client installed and Tiller server is running. To instal
9. Deploy the gateway controller
kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/hack/k8s/manifests/gateway-controller-deployment.yaml
+
+## Deploy at cluster level
+To deploy Argo-Events controllers at cluster level where the controllers will be
+able to process gateway and sensor objects created in any namespace,
+
+1. Make sure to apply cluster role and binding to the service account,
+
+ kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/master/hack/k8s/manifests/argo-events-cluster-roles.yaml
+
+2. Update the configmap for both gateway and sensor and remove the `namespace` key from it.
+
+3. Deploy both gateway and sensor controllers and watch the magic.
diff --git a/docs/sensor.md b/docs/sensor.md
deleted file mode 100644
index a06a20a51e..0000000000
--- a/docs/sensor.md
+++ /dev/null
@@ -1,453 +0,0 @@
-# Sensor
-Sensors define a set of event dependencies (inputs) and triggers (outputs).
-
-
-
-
-
-
-
-
-## What is an event dependency?
-A dependency is an event the sensor is expecting to happen. It is defined as "gateway-name:event-source-name".
-Also, you can use [globs](https://github.com/gobwas/glob#syntax) to catch a set of events (e.g. "gateway-name:*").
-
-## What is a dependency group?
-A dependency group is basically a group of event dependencies.
-
-## What is a circuit?
-Circuit is any arbitrary boolean logic that can be applied on dependency groups.
-
-## What is a trigger?
-Refer to [Triggers](trigger.md).
-
-## How does it work?
- 1. Once a Sensor receives an event from a Gateway, either over HTTP or through NATS, it validates
- the event against dependencies defined in the Sensor spec. If the Sensor expects the event then the
- event is marked as valid and the dependency is marked as resolved.
-
- 2. If you haven't defined dependency groups, a Sensor waits for all dependencies to resolve and then
- kicks off each of its triggers in sequence. If filters are defined, the Sensor applies the filters to the
- incoming event. If the event passes the filters the Sensor's triggers are fired.
-
- 3. If you have defined dependency groups, a Sensor evaluates the group that the incoming event belongs to
- and marks the group as resolved if all other event dependencies in the group have already been resolved.
-
- 4. Whenever a dependency group is resolved, the Sensor evaluates the `circuit` defined in spec. If
- the `circuit` resolves to true, the triggers are fired. Sensors always wait for a `circuit` to resolve
- to true before firing triggers.
-
- 5. You may not want to fire all of the triggers defined in your Sensor spec. The `when` switch can be
- used to control when a certain trigger should be fired depending on which dependency group has been
- resolved.
-
- 6. After a Sensor fires its triggers, it transitions into `complete` state, increments completion
- counter and initializes it's state back to running, starting the process all over again. Any event that
- is received while the Sensor is waiting to restart is stored in an internal queue.
-
- **Note**: If you don't provide dependency groups and `circuit`, sensor performs an `AND` operation on event dependencies.
-
-## Basic Example
-
-Lets look at a basic example,
-
- apiVersion: argoproj.io/v1alpha1
- kind: Sensor
- metadata:
- name: webhook-sensor
- labels:
- sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
- spec:
- template:
- spec:
- containers:
- - name: "sensor"
- image: "argoproj/sensor"
- imagePullPolicy: Always
- serviceAccountName: argo-events-sa
- dependencies:
- - name: "webhook-gateway:example"
- eventProtocol:
- type: "HTTP"
- http:
- port: "9300"
- triggers:
- - template:
- name: webhook-workflow-trigger
- group: argoproj.io
- version: v1alpha1
- kind: Workflow
- source:
- inline: |
- apiVersion: argoproj.io/v1alpha1
- kind: Workflow
- metadata:
- generateName: webhook-
- spec:
- entrypoint: whalesay
- arguments:
- parameters:
- - name: message
- # this is the value that should be overridden
- value: hello world
- templates:
- - name: whalesay
- inputs:
- parameters:
- - name: message
- container:
- image: docker/whalesay:latest
- command: [cowsay]
- args: ["{{inputs.parameters.message}}"]
- resourceParameters:
- - src:
- event: "webhook-gateway:example"
- dest: spec.arguments.parameters.0.value
-
-i. The `spec.template.spec` defines the template for the sensor pod.
-
-ii. The `dependencies` define list of events the sensor is expected to receive, meaning this is an AND operation.
-
-iii. `eventProtocol` express the mode of communication to receive events
-from gateways.
-
-iv. `triggers` define list of templates, each containing specification for a K8s resource and optional parameters.
-
-## Circuit
-
-Now, lets look at a more complex example involving a circuit,
-
- apiVersion: argoproj.io/v1alpha1
- kind: Sensor
- metadata:
- name: webhook-sensor-http
- labels:
- sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
- spec:
- template:
- spec:
- containers:
- - name: "sensor"
- image: "argoproj/sensor"
- imagePullPolicy: Always
- serviceAccountName: argo-events-sa
- dependencies:
- - name: "webhook-gateway-http:endpoint1"
- filters:
- name: "context-filter"
- context:
- source:
- host: xyz.com
- contentType: application/json
- - name: "webhook-gateway-http:endpoint2"
- - name: "webhook-gateway-http:endpoint3"
- - name: "webhook-gateway-http:endpoint4"
- filters:
- name: "data-filter"
- data:
- - path: bucket
- type: string
- value:
- - "argo-workflow-input"
- - "argo-workflow-input1"
- - name: "webhook-gateway-http:endpoint5"
- - name: "webhook-gateway-http:endpoint6"
- - name: "webhook-gateway-http:endpoint7"
- - name: "webhook-gateway-http:endpoint8"
- - name: "webhook-gateway-http:endpoint9"
- dependencyGroups:
- - name: "group_1"
- dependencies:
- - "webhook-gateway-http:endpoint1"
- - "webhook-gateway-http:endpoint2"
- - name: "group_2"
- dependencies:
- - "webhook-gateway-http:endpoint3"
- - name: "group_3"
- dependencies:
- - "webhook-gateway-http:endpoint4"
- - "webhook-gateway-http:endpoint5"
- - name: "group_4"
- dependencies:
- - "webhook-gateway-http:endpoint6"
- - "webhook-gateway-http:endpoint7"
- - "webhook-gateway-http:endpoint8"
- - name: "group_5"
- dependencies:
- - "webhook-gateway-http:endpoint9"
- circuit: "group_1 || group_2 || ((group_3 || group_4) && group_5)"
- eventProtocol:
- type: "HTTP"
- http:
- port: "9300"
- triggers:
- - template:
- when:
- any:
- - "group_1"
- - "group_2"
- name: webhook-workflow-trigger
- group: argoproj.io
- version: v1alpha1
- kind: Workflow
- source:
- inline: |
- apiVersion: argoproj.io/v1alpha1
- kind: Workflow
- metadata:
- generateName: hello-1-
- spec:
- entrypoint: whalesay
- arguments:
- parameters:
- - name: message
- # this is the value that should be overridden
- value: hello world
- templates:
- - name: whalesay
- inputs:
- parameters:
- - name: message
- container:
- image: docker/whalesay:latest
- command: [cowsay]
- args: ["{{inputs.parameters.message}}"]
- resourceParameters:
- - src:
- event: "webhook-gateway-http:endpoint1"
- dest: spec.arguments.parameters.0.value
- - template:
- name: webhook-workflow-trigger-2
- when:
- all:
- - "group_5"
- - "group_4"
- group: argoproj.io
- version: v1alpha1
- kind: Workflow
- source:
- inline: |
- apiVersion: argoproj.io/v1alpha1
- kind: Workflow
- metadata:
- generateName: hello-world-2-
- spec:
- entrypoint: whalesay
- templates:
- - name: whalesay
- container:
- args:
- - "hello world"
- command:
- - cowsay
- image: "docker/whalesay:latest"
- - template:
- name: webhook-workflow-trigger-common
- group: argoproj.io
- version: v1alpha1
- kind: Workflow
- source:
- inline: |
- apiVersion: argoproj.io/v1alpha1
- kind: Workflow
- metadata:
- generateName: hello-world-common-
- spec:
- entrypoint: whalesay
- templates:
- - name: whalesay
- container:
- args:
- - "hello world"
- command:
- - cowsay
- image: "docker/whalesay:latest"
-
-The sensor defines the list of dependencies with few containing filters. The filters are explained next. These dependencies are then grouped using `dependenciesGroups`.
-
-The significance of `dependenciesGroups` is, if you don't define it, the sensor will apply an `AND` operation and wait for all events to occur. But you may not always want to wait for all the specified events to occur,
-but rather trigger the workflows as soon as a group or groups of event dependencies are satisfied.
-
-To define the logic of when to trigger the workflows, `circuit` contains a boolean expression that is evaluated every time a event dependency
-is satisfied. Template can optionally contain `when` switch that determines when to trigger this template.
-
-In the example, the first template will get triggered when either `group_1` or `group_2` dependencies groups are satisfied, the second template will get triggered only when both
-`group_4` and `group_5` are triggered and the last template will be triggered every time the circuit evaluates to true.
-
-## Execution and Backoff Policy
-
- apiVersion: argoproj.io/v1alpha1
- kind: Sensor
- metadata:
- name: trigger-backoff
- labels:
- sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
- spec:
- template:
- spec:
- containers:
- - name: "sensor"
- image: "argoproj/sensor"
- imagePullPolicy: Always
- serviceAccountName: argo-events-sa
- dependencies:
- - name: "webhook-gateway-http:foo"
- eventProtocol:
- type: "HTTP"
- http:
- port: "9300"
- # If set to true, marks sensor state as `error` if the previous trigger round fails.
- # Once sensor state is set to `error`, no further triggers will be processed.
- errorOnFailedRound: true
- triggers:
- - template:
- name: trigger-1
- # Policy to configure backoff and execution criteria for the trigger
- # Because the sensor is able to trigger any K8s resource, it determines the resource state by looking at the resource's labels.
- policy:
- # Backoff before checking the resource labels
- backoff:
- # Duration is the duration in nanoseconds
- duration: 1000000000 # 1 second
- # Duration is multiplied by factor each iteration
- factor: 2
- # The amount of jitter applied each iteration
- jitter: 0.1
- # Exit with error after this many steps
- steps: 5
- # the criteria to decide if a resource is in success or failure state.
- # labels set on the resource decide if resource is in success or failed state.
- state:
- # Note: Set either success or failure labels. If you set both, only success labels will be considered.
-
- # Success defines labels required to identify a resource in success state
- success:
- workflows.argoproj.io/phase: Succeeded
- # Failure defines labels required to identify a resource in failed state
- failure:
- workflows.argoproj.io/phase: Failed
- # Determines whether trigger should be marked as failed if the backoff times out and sensor is still unable to decide the state of the trigger.
- # defaults to false
- errorOnBackoffTimeout: true
- group: argoproj.io
- version: v1alpha1
- kind: Workflow
- source:
- inline: |
- apiVersion: argoproj.io/v1alpha1
- kind: Workflow
- metadata:
- generateName: webhook-
- spec:
- entrypoint: whalesay
- arguments:
- parameters:
- - name: message
- # this is the value that should be overridden
- value: hello world
- templates:
- - name: whalesay
- inputs:
- parameters:
- - name: message
- container:
- image: docker/whalesay:latest
- command: [cowsay]
- args: ["{{inputs.parameters.message}}"]
- resourceParameters:
- - src:
- event: "webhook-gateway-http:foo"
- dest: spec.arguments.parameters.0.value
- - template:
- name: trigger-2
- policy:
- backoff:
- duration: 1000000000 # 1 second
- factor: 2
- jitter: 0.1
- steps: 5
- state:
- failure:
- workflows.argoproj.io/phase: Failed
- errorOnBackoffTimeout: false
- group: argoproj.io
- version: v1alpha1
- kind: Workflow
- source:
- inline: |
- apiVersion: argoproj.io/v1alpha1
- kind: Workflow
- metadata:
- generateName: hello-world-
- spec:
- entrypoint: whalesay
- templates:
- - name: whalesay
- container:
- image: docker/whalesay:latest
- command: [cowsay]
- args: ["hello world"]
-
-A trigger template can contain execution and backoff policy. Once the trigger is executed by template, it's
-state is determined using `state` labels. If labels defined in `success` criteria matches the subset of labels defined on the
-resource, the execution is treated as successful and vice-versa for labels defined in `failure` criteria. Please note that you can
-only define either success or failure criteria.
-
-The `backoff` directs the sensor on when to check the labels of the executed trigger resource. If after the backoff retries, the sensor is not able to determine the
-state of the resource, `errorOnBackoffTimeout` controls whether to mark trigger as failure.
-
-The `errorOnFailedRound` defined outside of triggers decides whether to set the sensor state to `error` if the previous
-round of triggers execution fails.
-
-## Filters
-You can apply following filters on an event dependency. If the event payload passes the filter, then only it will
-be treated as a valid event.
-
-| Type | Description |
-|----------|-------------------|
-| **Time** | Filters the signal based on time constraints |
-| **EventContext** | Filters metadata that provides circumstantial information about the signal. |
-| **Data** | Describes constraints and filters for payload |
-
-
-
-### Time Filter
-
- filters:
- time:
- start: "2016-05-10T15:04:05Z07:00"
- stop: "2020-01-02T15:04:05Z07:00"
-
-[Example](https://github.com/argoproj/argo-events/blob/master/examples/sensors/time-filter-webhook.yaml)
-
-### EventContext Filter
-
- filters:
- context:
- source:
- host: amazon.com
- contentType: application/json
-
-[Example](https://github.com/argoproj/argo-events/blob/master/examples/sensors/context-filter-webhook.yaml)
-
-### Data filter
-
- filters:
- data:
- - path: bucket
- type: string
- value: argo-workflow-input
-
-[Example](https://github.com/argoproj/argo-events/blob/master/examples/sensors/data-filter-webhook.yaml)
-
-## Examples
-You can find sensor examples [here](https://github.com/argoproj/argo-events/tree/master/examples/sensors)
diff --git a/examples/event-sources/amqp.yaml b/examples/event-sources/amqp.yaml
index 9c4543e553..6840a16d87 100644
--- a/examples/event-sources/amqp.yaml
+++ b/examples/event-sources/amqp.yaml
@@ -1,36 +1,32 @@
-# This configmap contains the event sources configurations for AMQP gateway
-
----
-apiVersion: v1
-kind: ConfigMap
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
metadata:
name: amqp-event-source
- labels:
- # do not remove
- argo-events-event-source-version: v0.11
-data:
- # no retries if connection to amqp service is not successful
- example-without-retry: |-
- # url of the service
- url: amqp://amqp.argo-events:5672
- # name of the exchange
- exchangeName: "name of the exchange"
- # type of the exchange
- exchangeType: fanout
- # routing key for the exchange
- routingKey: "routing key"
+spec:
+ type: amqp
+ amqp:
+ # no retries if connection to amqp service is not successful
+ example-without-retry:
+ # url of the service
+ url: "amqp://amqp.argo-events:5672"
+ # name of the exchange
+ exchangeName: "name of the exchange"
+ # type of the exchange
+ exchangeType: "fanout"
+ # routing key for the exchange
+ routingKey: "routing key"
- # retry after each backoff to set up a successful connection
- example-with-retry: |-
- url: amqp://amqp.argo-events:5672
- exchangeName: "name of the exchange"
- exchangeType: fanout
- routingKey: "routing key"
- backoff:
- # duration in nanoseconds. following value is 10 seconds
- duration: 10000000000
- # how many backoffs
- steps: 5
- # factor to increase on each step.
- # setting factor > 1 makes backoff exponential.
- factor: 2
+ # retry after each backoff to set up a successful connection
+ example-with-retry:
+ url: "amqp://amqp.argo-events:5672"
+ exchangeName: "name of the exchange"
+ exchangeType: "fanout"
+ routingKey: "routing key"
+ backoff:
+ # duration in nanoseconds. following value is 10 seconds
+ duration: 10000000000
+ # how many backoffs
+ steps: 5
+ # factor to increase on each step.
+ # setting factor > 1 makes backoff exponential.
+ factor: 2
diff --git a/examples/event-sources/artifact.yaml b/examples/event-sources/artifact.yaml
deleted file mode 100644
index 32c214d8b9..0000000000
--- a/examples/event-sources/artifact.yaml
+++ /dev/null
@@ -1,61 +0,0 @@
-# This configmap contains the event sources configurations for Artifact gateway
-
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: artifact-event-source
- labels:
- # do not remove
- argo-events-event-source-version: v0.11
-data:
- example-1: |-
- # bucket information
- bucket:
- # name of the bucket
- name: input
- # s3 service endpoint
- endpoint: minio-service.argo-events:9000
- # list of events to subscribe to
- # Visit https://docs.minio.io/docs/minio-bucket-notification-guide.html
- events:
- - s3:ObjectCreated:Put
- - s3:ObjectRemoved:Delete
- # Filters to apply on the key
- # Optional
- # e.g. filter for key that starts with "hello-" and ends with ".txt"
- filter:
- prefix: "hello-"
- suffix: ".txt"
- # type of the connection
- insecure: true
- # accessKey refers to K8s secret that stores the access key
- accessKey:
- # Key within the K8s secret whose corresponding value (must be base64 encoded) is access key
- key: accesskey
- # Name of the K8s secret that contains the access key
- name: artifacts-minio
- # secretKey contains information about K8s secret that stores the secret key
- secretKey:
- # Key within the K8s secret whose corresponding value (must be base64 encoded) is secret key
- key: secretkey
- # Name of the K8s secret that contains the secret key
- name: artifacts-minio
-
- example-2 : |-
- bucket:
- name: mybucket
- endpoint: minio-service.argo-events:9000
- events:
- - s3:ObjectCreated:Put
- # no filter
- filter:
- prefix: ""
- suffix: ""
- insecure: true
- accessKey:
- key: accesskey
- name: artifacts-minio
- secretKey:
- key: secretkey
- name: artifacts-minio
diff --git a/examples/event-sources/aws-sns.yaml b/examples/event-sources/aws-sns.yaml
index b45cd2a8ce..95ebc63cdd 100644
--- a/examples/event-sources/aws-sns.yaml
+++ b/examples/event-sources/aws-sns.yaml
@@ -1,68 +1,64 @@
-# This configmap contains the event sources configurations for AWS SNS gateway
-
----
-apiVersion: v1
-kind: ConfigMap
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
metadata:
name: aws-sns-event-source
- labels:
- # do not remove
- argo-events-event-source-version: v0.11
-data:
- example: |-
- # topic arn
- topicArn: "topic-arn"
- # hook contains configuration for the HTTP server running in the gateway.
- # AWS will send events to following port and endpoint
- hook:
- # endpoint to listen events on
- endpoint: "/"
- # port to run HTTP server on
- port: "12000"
- # url the gateway will use to register at AWS.
- # This url must be reachable from outside the cluster.
- # The gateway pod is backed by the service defined in the gateway spec. So get the URL for that service AWS can reach to.
- url: "http://myfakeurl.fake"
- # accessKey contains information about K8s secret that stores the access key
- accessKey:
- # Key within the K8s secret whose corresponding value (must be base64 encoded) is access key
- key: accesskey
- # Name of the K8s secret that contains the access key
- name: aws-secret
- # secretKey contains information about K8s secret that stores the secret key
- secretKey:
- # Key within the K8s secret whose corresponding value (must be base64 encoded) is secret key
- key: secretkey
- # Name of the K8s secret that contains the secret key
- name: aws-secret
- # aws region
- region: "us-east-1"
+spec:
+ type: "sns"
+ sns:
+ example:
+ # topic arn
+ topicArn: "topic-arn"
+ # hook contains configuration for the HTTP server running in the gateway.
+ # AWS will send events to following port and endpoint
+ webhook:
+ # endpoint to listen events on
+ endpoint: "/"
+ # port to run HTTP server on
+ port: "12000"
+ # url the gateway will use to register at AWS.
+ # This url must be reachable from outside the cluster.
+ # The gateway pod is backed by the service defined in the gateway spec. So get the URL for that service AWS can reach to.
+ url: "http://myfakeurl.fake"
+ # accessKey contains information about K8s secret that stores the access key
+ accessKey:
+ # Key within the K8s secret whose corresponding value (must be base64 encoded) is access key
+ key: accesskey
+ # Name of the K8s secret that contains the access key
+ name: aws-secret
+ # secretKey contains information about K8s secret that stores the secret key
+ secretKey:
+ # Key within the K8s secret whose corresponding value (must be base64 encoded) is secret key
+ key: secretkey
+ # Name of the K8s secret that contains the secret key
+ name: aws-secret
+ # aws region
+ region: "us-east-1"
- example-with-secure-connection: |-
- topicArn: "topic-arn"
- hook:
- endpoint: "/"
- # gateway can run multiple HTTP servers, just define a unique port.
- port: "13000"
- url: "http://mysecondfakeurl.fake"
- # path to file that is mounted in gateway pod which contains certs
- serverCertPath: "some path in pod"
- # path to file that is mounted in gateway pod which contains private key
- serverKeyPath: "some path in pod"
- accessKey:
- name: aws-secret
- key: access
- secretKey:
- name: aws-secret
- key: secret
- region: "us-east-1"
+ example-with-secure-connection:
+ topicArn: "topic-arn"
+ webhook:
+ endpoint: "/"
+ # gateway can run multiple HTTP servers, just define a unique port.
+ port: "13000"
+ url: "http://mysecondfakeurl.fake"
+ # path to file that is mounted in gateway pod which contains certs
+ serverCertPath: "some path in pod"
+ # path to file that is mounted in gateway pod which contains private key
+ serverKeyPath: "some path in pod"
+ accessKey:
+ name: aws-secret
+ key: access
+ secretKey:
+ name: aws-secret
+ key: secret
+ region: "us-east-1"
- example-without-credentials: |-
- # If AWS access credentials are already present on the Pod's IAM role running the Gateway,
- # the AWS session will utilize the existing config and hence we do not need to provide explicit credentials.
- topicArn: "topic-arn"
- hook:
- endpoint: "/"
- port: "13000"
- url: "http://mysecondfakeurl.fake"
- region: "us-east-1"
+ example-without-credentials:
+ # If AWS access credentials are already present on the Pod's IAM role running the Gateway,
+ # the AWS session will utilize the existing config and hence we do not need to provide explicit credentials.
+ topicArn: "topic-arn"
+ webhook:
+ endpoint: "/"
+ port: "13000"
+ url: "http://mysecondfakeurl.fake"
+ region: "us-east-1"
diff --git a/examples/event-sources/aws-sqs.yaml b/examples/event-sources/aws-sqs.yaml
index 72c2470d39..95f46c3631 100644
--- a/examples/event-sources/aws-sqs.yaml
+++ b/examples/event-sources/aws-sqs.yaml
@@ -1,47 +1,34 @@
-# This configmap contains the event sources configurations for AWS SQS gateway
-
----
-apiVersion: v1
-kind: ConfigMap
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
metadata:
name: aws-sqs-event-source
- labels:
- # do not remove
- argo-events-event-source-version: v0.11
-data:
- example-1: |-
- # accessKey contains information about K8s secret that stores the access key
- accessKey:
- # Key within the K8s secret whose corresponding value (must be base64 encoded) is access key
- key: accesskey
- # Name of the K8s secret that contains the access key
- name: aws-secret
- # secretKey contains information about K8s secret that stores the secret key
- secretKey:
- # Key within the K8s secret whose corresponding value (must be base64 encoded) is secret key
- key: secretkey
- # Name of the K8s secret that contains the secret key
- name: aws-secret
- # aws region
- region: "us-east-1"
- # name of the queue. The gateway resolves the url of the queue from the queue name.
- queue: "my-fake-queue-1"
- # The duration (in seconds) for which the call waits for a message to arrive in the queue before returning.
- # MUST BE > 0 AND <= 20
- waitTimeSeconds: 20
-
- example-2: |-
- accessKey:
- key: accesskey
- name: aws-secret
- secretKey:
- key: secretkey
- name: aws-secret
- region: "us-east-1"
- queue: "my-fake-queue-2"
- waitTimeSeconds: 20
+spec:
+ type: "sqs"
+ spec:
+ example:
+ # accessKey contains information about K8s secret that stores the access key
+ accessKey:
+ # Key within the K8s secret whose corresponding value (must be base64 encoded) is access key
+ key: accesskey
+ # Name of the K8s secret that contains the access key
+ name: aws-secret
+ # secretKey contains information about K8s secret that stores the secret key
+ secretKey:
+ # Key within the K8s secret whose corresponding value (must be base64 encoded) is secret key
+ key: secretkey
+ # Name of the K8s secret that contains the secret key
+ name: aws-secret
+ # aws region
+ region: "us-east-1"
+ # name of the queue. The gateway resolves the url of the queue from the queue name.
+ queue: "my-fake-queue-1"
+ # The duration (in seconds) for which the call waits for a message to arrive in the queue before returning.
+ # MUST BE > 0 AND <= 20
+ waitTimeSeconds: 20
- example-3: |-
- region: "us-east-1"
- queue: "my-fake-queue-2"
- waitTimeSeconds: 20
+ example-without-credentials:
+ # If AWS access credentials are already present on the Pod's IAM role running the Gateway,
+ # the AWS session will utilize the existing config and hence we do not need to provide explicit credentials.
+ region: "us-east-1"
+ queue: "my-fake-queue-2"
+ waitTimeSeconds: 20
diff --git a/examples/event-sources/calendar.yaml b/examples/event-sources/calendar.yaml
index 3f819389e5..8d376526bf 100644
--- a/examples/event-sources/calendar.yaml
+++ b/examples/event-sources/calendar.yaml
@@ -1,44 +1,37 @@
-# This configmap contains the event sources configurations for Calendar gateway
-
----
-apiVersion: v1
-kind: ConfigMap
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
metadata:
- name: calendar-event-source
- labels:
- # do not remove
- argo-events-event-source-version: v0.11
-data:
- example-with-interval: |-
- # creates an event every 10 seconds
- interval: 10s
+ name: calendar-source
+spec:
+ type: "calendar"
+ calendar:
+ example-with-interval:
+ # creates an event every 10 seconds
+ interval: "10s"
- example-with-schedule: |-
- # schedules an event at 30 minutes past each hour
- schedule: "30 * * * *"
+ example-with-schedule:
+ # schedules an event at 30 minutes past each hour
+ schedule: "30 * * * *"
- schedule-with-static-user-payload: |-
- schedule: "30 * * * *"
- # userPayload is a static string that will be send to the the sensor with each event payload
- # whatever you put here is blindly delivered to sensor.
- userPayload: "{\"hello\": \"world\"}"
+ schedule-with-static-user-payload:
+ schedule: "30 * * * *"
+ # userPayload is a static string that will be send to the the sensor with each event payload
+ # whatever you put here is blindly delivered to sensor.
+ userPayload: "{\"hello\": \"world\"}"
- schedule-in-specific-timezone: |-
- # creates an event every 20 seconds
- interval: 20s
- # userPayload is a static string that will be send to the the sensor with each event payload
- # whatever you put here is blindly delivered to sensor.
- userPayload: "{\"hello\": \"world\"}"
- # timezone
- # more info: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
- timezone: "America/New_York"
+ schedule-in-specific-timezone:
+ # creates an event every 20 seconds
+ interval: "20s"
+ # userPayload is a static string that will be send to the the sensor with each event payload
+ # whatever you put here is blindly delivered to sensor.
+ userPayload: "{\"hello\": \"world\"}"
+ # timezone
+ # more info: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
+ timezone: "America/New_York"
- schedule-with-exclusion-dates: |-
- schedule: "30 * * * *"
- # more info https://icalendar.org/iCalendar-RFC-5545/3-8-5-3-recurrence-rule.html
- # only exclusion dates are supported
- # year, month and day are matched
- recurrence:
- - "EXDATE:20190102T150405Z"
- - "EXDATE:20190602T160210Z"
- timezone: "America/New_York"
+ schedule-with-exclusion-dates:
+ schedule: "30 * * * *"
+ # year, month and day are matched
+ exclusionDates:
+ - "EXDATE:20190102T150405Z"
+ - "EXDATE:20190602T160210Z"
diff --git a/examples/event-sources/file.yaml b/examples/event-sources/file.yaml
index 1f9d0bf2bd..fd3e486273 100644
--- a/examples/event-sources/file.yaml
+++ b/examples/event-sources/file.yaml
@@ -1,25 +1,23 @@
-# This configmap contains the event sources configurations for File gateway
-
----
-apiVersion: v1
-kind: ConfigMap
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
metadata:
- name: file-configmap
- labels:
- # do not remove
- argo-events-event-source-version: v0.11
-data:
- example-with-path: |-
- # directory to watch
- directory: "/bin/"
- # type of the event
- # supported types are: CREATE, WRITE, REMOVE, RENAME, CHMOD
- type: CREATE
- # path to watch
- path: x.txt
+ name: file-event-source
+spec:
+ type: file
+ file:
+ example-with-path:
+ watchPathConfig:
+ # directory to watch
+ directory: "/bin/"
+ # path to watch
+ path: "x.txt"
+ # type of the event
+ # supported types are: CREATE, WRITE, REMOVE, RENAME, CHMOD
+ eventType: "CREATE"
- example-with-path-regex: |-
- directory: "/bin/"
- type: CREATE
- # the gateway will watch events for path that matches following regex
- pathRegexp: "([a-z]+).txt"
+ example-with-path-regex:
+ watchPathConfig:
+ directory: "/bin/"
+ # the gateway will watch events for path that matches following regex
+ pathRegexp: "([a-z]+).txt"
+ eventType: "CREATE"
diff --git a/examples/event-sources/gcp-pubsub.yaml b/examples/event-sources/gcp-pubsub.yaml
index acdf5faf18..a0029d2e41 100644
--- a/examples/event-sources/gcp-pubsub.yaml
+++ b/examples/event-sources/gcp-pubsub.yaml
@@ -1,21 +1,17 @@
-# This configmap contains the event sources configurations for GCP PubSub gateway
-
----
-apiVersion: v1
-kind: ConfigMap
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
metadata:
name: gcp-pubsub-event-source
- labels:
- # do not remove
- argo-events-event-source-version: v0.11
-data:
- example-1: |-
- # id of your project
- projectID: "my-fake-project-id"
- # (optional) id of project for topic, same as projectID by default
- # topicProjectID: "my-fake-topic-project-id"
- # topic name
- topic: "my-fake-topic"
- # Refers to the credential file that is mounted in the gateway pod.
- # ./validate.go is just a placeholder to make tests pass. Please place the path to actual credentials file :)
- credentialsFile: "./validate.go"
+spec:
+ type: "pubsub"
+ pubsub:
+ example-event-source:
+ # id of your project
+ projectID: "my-fake-project-id"
+ # (optional) id of project for topic, same as projectID by default
+ # topicProjectID: "my-fake-topic-project-id"
+ # topic name
+ topic: "my-fake-topic"
+ # Refers to the credential file that is mounted in the gateway pod.
+ # ./validate.go is just a placeholder to make tests pass. Please place the path to actual credentials file :)
+ credentialsFile: "./validate.go"
diff --git a/examples/event-sources/github.yaml b/examples/event-sources/github.yaml
index 9d6aa98409..8625718660 100644
--- a/examples/event-sources/github.yaml
+++ b/examples/event-sources/github.yaml
@@ -1,74 +1,70 @@
-# This configmap contains the event sources configurations for Github gateway
-# More info: https://developer.github.com/v3/repos/hooks/#create-a-hook
-
----
-apiVersion: v1
-kind: ConfigMap
+# Info on GitHub Webhook: https://developer.github.com/v3/repos/hooks/#create-a-hook
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
metadata:
name: github-event-source
- labels:
- # do not remove
- argo-events-event-source-version: v0.11
-data:
- example: |-
- # owner of the repo
- owner: "argoproj"
- # repository name
- repository: "argo-events"
- # Github will send events to following port and endpoint
- hook:
- # endpoint to listen to events on
- endpoint: "/push"
- # port to run internal HTTP server on
- port: "12000"
- # url the gateway will use to register at Github.
- # This url must be reachable from outside the cluster.
- # The gateway pod is backed by the service defined in the gateway spec. So get the URL for that service Github can reach to.
- url: "http://myfakeurl.fake"
- # type of events to listen to.
- # following listens to everything, hence *
- # You can find more info on https://developer.github.com/v3/activity/events/types/
- events:
- - "*"
- # apiToken refers to K8s secret that stores the github api token
- apiToken:
- # Name of the K8s secret that contains the access token
- name: github-access
- # Key within the K8s secret whose corresponding value (must be base64 encoded) is access token
- key: token
- # webHookSecret refers to K8s secret that stores the github hook secret
- webHookSecret:
- # Name of the K8s secret that contains the hook secret
- name: github-access
- # Key within the K8s secret whose corresponding value (must be base64 encoded) is hook secret
- key: secret
- # type of the connection between gateway and Github
- insecure: false
- # Determines if notifications are sent when the webhook is triggered
- active: true
- # The media type used to serialize the payloads
- contentType: "json"
+spec:
+ type: "github"
+ github:
+ example:
+ # owner of the repo
+ owner: "argoproj"
+ # repository name
+ repository: "argo-events"
+ # Github will send events to following port and endpoint
+ webhook:
+ # endpoint to listen to events on
+ endpoint: "/push"
+ # port to run internal HTTP server on
+ port: "12000"
+ # url the gateway will use to register at Github.
+ # This url must be reachable from outside the cluster.
+ # The gateway pod is backed by the service defined in the gateway spec. So get the URL for that service Github can reach to.
+ url: "http://myfakeurl.fake"
+ # type of events to listen to.
+ # following listens to everything, hence *
+ # You can find more info on https://developer.github.com/v3/activity/events/types/
+ events:
+ - "*"
+ # apiToken refers to K8s secret that stores the github api token
+ apiToken:
+ # Name of the K8s secret that contains the access token
+ name: github-access
+ # Key within the K8s secret whose corresponding value (must be base64 encoded) is access token
+ key: token
+ # webHookSecret refers to K8s secret that stores the github hook secret
+ webHookSecret:
+ # Name of the K8s secret that contains the hook secret
+ name: github-access
+ # Key within the K8s secret whose corresponding value (must be base64 encoded) is hook secret
+ key: secret
+ # type of the connection between gateway and Github
+ insecure: false
+ # Determines if notifications are sent when the webhook is triggered
+ active: true
+ # The media type used to serialize the payloads
+ contentType: "json"
- example-with-secure-connection: |-
- owner: "argoproj"
- repository: "argo"
- hook:
- endpoint: "/push"
- port: "13000"
- url: "http://myargofakeurl.fake"
- # path to file that is mounted in gateway pod which contains certs
- serverCertPath: "some path in pod"
- # path to file that is mounted in gateway pod which contains private key
- serverKeyPath: "some path in pod"
- events:
- - "push"
- - "delete"
- apiToken:
- name: github-access
- key: token
- webHookSecret:
- name: github-access
- key: secret
- insecure: true
- active: true
- contentType: "json"
+ example-with-secure-connection:
+ owner: "argoproj"
+ repository: "argo"
+ webhook:
+ endpoint: "/push"
+ port: "13000"
+ url: "http://myargofakeurl.fake"
+ # path to file that is mounted in gateway pod which contains certs
+ serverCertPath: "some path in pod"
+ # path to file that is mounted in gateway pod which contains private key
+ serverKeyPath: "some path in pod"
+ events:
+ - "push"
+ - "delete"
+ apiToken:
+ name: github-access
+ key: token
+ webHookSecret:
+ name: github-access
+ key: secret
+ insecure: true
+ active: true
+ contentType: "json"
diff --git a/examples/event-sources/gitlab.yaml b/examples/event-sources/gitlab.yaml
index 9cc3655c8d..ee124383a8 100644
--- a/examples/event-sources/gitlab.yaml
+++ b/examples/event-sources/gitlab.yaml
@@ -1,55 +1,51 @@
-# This configmap contains the event sources configurations for Gitlab gateway
-# More info: https://docs.gitlab.com/ce/api/projects.html#add-project-hook
-
----
-apiVersion: v1
-kind: ConfigMap
+# More info on GitLab project hooks: https://docs.gitlab.com/ce/api/projects.html#add-project-hook
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
metadata:
name: gitlab-event-source
- labels:
- # do not remove
- argo-events-event-source-version: v0.11
-data:
- example: |-
- # id of the project
- projectId: "1"
- # Github will send events to following port and endpoint
- hook:
- # endpoint to listen to events on
- endpoint: "/push"
- # port to run internal HTTP server on
- port: "12000"
- # url the gateway will use to register at Github.
- # This url must be reachable from outside the cluster.
- # The gateway pod is backed by the service defined in the gateway spec. So get the URL for that service Github can reach to.
- url: "http://myfakeurl.fake"
- # event to listen to
- # Visit https://docs.gitlab.com/ee/user/project/integrations/webhooks.html#events
- event: "PushEvents"
- # accessToken refers to K8s secret that stores the gitlab api token
- accessToken:
- # Key within the K8s secret whose corresponding value (must be base64 encoded) is access token
- key: accesskey
- # Name of the K8s secret that contains the access token
- name: gitlab-access
- # Do SSL verification when triggering the hook
- enableSSLVerification: false
- # Gitlab Base url
- gitlabBaseUrl: "YOUR_GITLAB_URL"
+spec:
+ type: "gitlab"
+ gitlab:
+ example:
+ # id of the project
+ projectId: "1"
+ # Github will send events to following port and endpoint
+ webhook:
+ # endpoint to listen to events on
+ endpoint: "/push"
+ # port to run internal HTTP server on
+ port: "12000"
+ # url the gateway will use to register at Github.
+ # This url must be reachable from outside the cluster.
+ # The gateway pod is backed by the service defined in the gateway spec. So get the URL for that service Github can reach to.
+ url: "http://myfakeurl.fake"
+ # event to listen to
+ # Visit https://docs.gitlab.com/ee/user/project/integrations/webhooks.html#events
+ event: "PushEvents"
+ # accessToken refers to K8s secret that stores the gitlab api token
+ accessToken:
+ # Key within the K8s secret whose corresponding value (must be base64 encoded) is access token
+ key: accesskey
+ # Name of the K8s secret that contains the access token
+ name: gitlab-access
+ # Do SSL verification when triggering the hook
+ enableSSLVerification: false
+ # Gitlab Base url
+ gitlabBaseUrl: "YOUR_GITLAB_URL"
- example-secure: |-
- projectId: "2"
- hook:
- endpoint: "/push"
- port: "13000"
- url: "http://mysecondfakeurl.fake"
- # path to file that is mounted in gateway pod which contains certs
- serverCertPath: "some path in pod"
- # path to file that is mounted in gateway pod which contains private key
- serverKeyPath: "some path in pod"
- event: "PushEvents"
- accessToken:
- key: accesskey
- name: gitlab-access
- enableSSLVerification: true
- gitlabBaseUrl: "YOUR_GITLAB_URL"
+ example-secure:
+ projectId: "2"
+ webhook:
+ endpoint: "/push"
+ port: "13000"
+ url: "http://mysecondfakeurl.fake"
+ # path to file that is mounted in gateway pod which contains certs
+ serverCertPath: "some path in pod"
+ # path to file that is mounted in gateway pod which contains private key
+ serverKeyPath: "some path in pod"
+ event: "PushEvents"
+ accessToken:
+ key: accesskey
+ name: gitlab-access
+ enableSSLVerification: true
+ gitlabBaseUrl: "YOUR_GITLAB_URL"
diff --git a/examples/event-sources/hdfs.yaml b/examples/event-sources/hdfs.yaml
index e170f8e4f8..fe0677d023 100644
--- a/examples/event-sources/hdfs.yaml
+++ b/examples/event-sources/hdfs.yaml
@@ -1,31 +1,27 @@
-# This configmap contains the event sources configurations for HDFS gateway
-
----
-apiVersion: v1
-kind: ConfigMap
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
metadata:
- name: hdfs-gateway-configmap
- labels:
- # do not remove
- argo-events-event-source-version: v0.11
-data:
- example-1: |-
- directory: "/tmp/"
- type: "CREATE"
- path: x.txt
- addresses:
- - my-hdfs-namenode-0.my-hdfs-namenode.default.svc.cluster.local:8020
- - my-hdfs-namenode-1.my-hdfs-namenode.default.svc.cluster.local:8020
- hdfsUser: root
- # krbCCacheSecret:
- # name: krb
- # key: krb5cc_0
- # krbKeytabSecret:
- # name: krb
- # key: user1.keytab
- # krbUsername: "user1"
- # krbRealm: "MYCOMPANY.COM"
- # krbConfigConfigMap:
- # name: my-hdfs-krb5-config
- # key: krb5.conf
- # krbServicePrincipalName: hdfs/_HOST
+ name: hdfs-event-source
+spec:
+ type: "hdfs"
+ hdfs:
+ example:
+ directory: "/tmp/"
+ type: "CREATE"
+ path: x.txt
+ addresses:
+ - my-hdfs-namenode-0.my-hdfs-namenode.default.svc.cluster.local:8020
+ - my-hdfs-namenode-1.my-hdfs-namenode.default.svc.cluster.local:8020
+ hdfsUser: root
+ # krbCCacheSecret:
+ # name: krb
+ # key: krb5cc_0
+ # krbKeytabSecret:
+ # name: krb
+ # key: user1.keytab
+ # krbUsername: "user1"
+ # krbRealm: "MYCOMPANY.COM"
+ # krbConfigConfigMap:
+ # name: my-hdfs-krb5-config
+ # key: krb5.conf
+ # krbServicePrincipalName: hdfs/_HOST
diff --git a/examples/event-sources/kafka.yaml b/examples/event-sources/kafka.yaml
index c26cf181b6..b09443008d 100644
--- a/examples/event-sources/kafka.yaml
+++ b/examples/event-sources/kafka.yaml
@@ -1,33 +1,29 @@
-# This configmap contains the event sources configurations for Kafka gateway
-
----
-apiVersion: v1
-kind: ConfigMap
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
metadata:
name: kafka-event-source
- labels:
- # do not remove
- argo-events-event-source-version: v0.11
-data:
- # no retries if connection to kafka service is not successful
- example-without-retry: |-
- # url of the service
- url: kafka.argo-events:9092
- # name of the topic
- topic: "topic-1"
- # partition number
- partition: "0"
+spec:
+ type: "kafka"
+ kafka:
+ # no retries if connection to kafka service is not successful
+ example-without-retry:
+ # url of the service
+ url: "kafka.argo-events:9092"
+ # name of the topic
+ topic: "topic-1"
+ # partition number
+ partition: "0"
- # retry after each backoff to set up a successful connection
- example-with-retry: |-
- url: kafka.argo-events:9092
- topic: "topic-2"
- partition: "1"
- backoff:
- # duration in nanoseconds. following value is 10 seconds
- duration: 10000000000
- # how many backoffs
- steps: 5
- # factor to increase on each step.
- # setting factor > 1 makes backoff exponential.
- factor: 2
+ # retry after each backoff to set up a successful connection
+ example-with-retry:
+ url: "kafka.argo-events:9092"
+ topic: "topic-2"
+ partition: "1"
+ backoff:
+ # duration in nanoseconds. following value is 10 seconds
+ duration: 10000000000
+ # how many backoffs
+ steps: 5
+ # factor to increase on each step.
+ # setting factor > 1 makes backoff exponential.
+ factor: 2
diff --git a/examples/event-sources/minio.yaml b/examples/event-sources/minio.yaml
new file mode 100644
index 0000000000..0ee09be9b8
--- /dev/null
+++ b/examples/event-sources/minio.yaml
@@ -0,0 +1,57 @@
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
+metadata:
+ name: minio-event-source
+spec:
+ type: "minio"
+ minio:
+ example-with-filter:
+ # bucket information
+ bucket:
+ # name of the bucket
+ name: input
+ # s3 service endpoint
+ endpoint: minio-service.argo-events:9000
+ # list of events to subscribe to
+ # Visit https://docs.minio.io/docs/minio-bucket-notification-guide.html
+ events:
+ - s3:ObjectCreated:Put
+ - s3:ObjectRemoved:Delete
+ # Filters to apply on the key
+ # Optional
+ # e.g. filter for key that starts with "hello-" and ends with ".txt"
+ filter:
+ prefix: "hello-"
+ suffix: ".txt"
+ # type of the connection
+ insecure: true
+ # accessKey refers to K8s secret that stores the access key
+ accessKey:
+ # Key within the K8s secret whose corresponding value (must be base64 encoded) is access key
+ key: accesskey
+ # Name of the K8s secret that contains the access key
+ name: artifacts-minio
+ # secretKey contains information about K8s secret that stores the secret key
+ secretKey:
+ # Key within the K8s secret whose corresponding value (must be base64 encoded) is secret key
+ key: secretkey
+ # Name of the K8s secret that contains the secret key
+ name: artifacts-minio
+
+ example-without-filter:
+ bucket:
+ name: mybucket
+ endpoint: minio-service.argo-events:9000
+ events:
+ - s3:ObjectCreated:Put
+ # no filter
+ filter:
+ prefix: ""
+ suffix: ""
+ insecure: true
+ accessKey:
+ key: accesskey
+ name: artifacts-minio
+ secretKey:
+ key: secretkey
+ name: artifacts-minio
diff --git a/examples/event-sources/mqtt.yaml b/examples/event-sources/mqtt.yaml
index 2a6bd63f11..2767db0fef 100644
--- a/examples/event-sources/mqtt.yaml
+++ b/examples/event-sources/mqtt.yaml
@@ -1,34 +1,30 @@
-# This configmap contains the event sources configurations for MQTT gateway
-
----
-apiVersion: v1
-kind: ConfigMap
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
metadata:
name: mqtt-event-source
- labels:
- # do not remove
- argo-events-event-source-version: v0.11
-data:
- # no retries if connection to mqtt service is not successful
- example-without-retry: |-
- # url of your mqtt service
- url: tcp://mqtt.argo-events:1883
- # topic name
- topic: foo
- # client id
- clientId: 1234
+spec:
+ type: "mqtt"
+ mqtt:
+ # no retries if connection to mqtt service is not successful
+ example-without-retry:
+ # url of your mqtt service
+ url: "tcp://mqtt.argo-events:1883"
+ # topic name
+ topic: "foo"
+ # client id
+ clientId: 1234
- # retry after each backoff to set up a successful connection
- example-with-retry: |-
- url: tcp://mqtt.argo-events:1883
- topic: bar
- # client id
- clientId: 2345
- backoff:
- # duration in nanoseconds. following value is 10 seconds
- duration: 10000000000
- # how many backoffs
- steps: 5
- # factor to increase on each step.
- # setting factor > 1 makes backoff exponential.
- factor: 2
+ # retry after each backoff to set up a successful connection
+ example-with-retry:
+ url: "tcp://mqtt.argo-events:1883"
+ topic: "bar"
+ # client id
+ clientId: 2345
+ backoff:
+ # duration in nanoseconds. following value is 10 seconds
+ duration: 10000000000
+ # how many backoffs
+ steps: 5
+ # factor to increase on each step.
+ # setting factor > 1 makes backoff exponential.
+ factor: 2
diff --git a/examples/event-sources/nats.yaml b/examples/event-sources/nats.yaml
index b869ec5e9a..2f0884cf16 100644
--- a/examples/event-sources/nats.yaml
+++ b/examples/event-sources/nats.yaml
@@ -1,32 +1,28 @@
-# This configmap contains the event sources configurations for NATS gateway
-
----
-apiVersion: v1
-kind: ConfigMap
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
metadata:
name: nats-event-source
- labels:
- # do not remove
- argo-events-event-source-version: v0.11
-data:
- # no retries if connection to nats service is not successful
- example-without-retry: |-
- # url of the nats service
- url: nats://nats.argo-events:4222
- # subject name
- subject: foo
+spec:
+ type: "nats"
+ nats:
+ # no retries if connection to nats service is not successful
+ example-without-retry:
+ # url of the nats service
+ url: "nats://nats.argo-events:4222"
+ # subject name
+ subject: "foo"
- # retry after each backoff to set up a successful connection
- example-with-retry: |-
- # url of the nats service
- url: nats://nats.argo-events:4222
- # subject name
- subject: foo
- backoff:
- # duration in nanoseconds. following value is 10 seconds
- duration: 10000000000
- # how many backoffs
- steps: 5
- # factor to increase on each step.
- # setting factor > 1 makes backoff exponential.
- factor: 2
+ # retry after each backoff to set up a successful connection
+ example-with-retry:
+ # url of the nats service
+ url: "nats://nats.argo-events:4222"
+ # subject name
+ subject: "foo"
+ backoff:
+ # duration in nanoseconds. following value is 10 seconds
+ duration: 10000000000
+ # how many backoffs
+ steps: 5
+ # factor to increase on each step.
+ # setting factor > 1 makes backoff exponential.
+ factor: 2
diff --git a/examples/event-sources/resource.yaml b/examples/event-sources/resource.yaml
index 5cecf018ff..251df1d32a 100644
--- a/examples/event-sources/resource.yaml
+++ b/examples/event-sources/resource.yaml
@@ -1,81 +1,77 @@
-# This configmap contains the event sources configurations for Resource gateway
-
----
-apiVersion: v1
-kind: ConfigMap
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
metadata:
name: resource-event-source
- labels:
- # do not remove
- argo-events-event-source-version: v0.11
-data:
- # watch workflows that are in successful state
- example: |-
- # namespace to listen events within
- namespace: argo-events
- # resource group
- group: "argoproj.io"
- # resource version
- version: "v1alpha1"
- # resource kind
- resource: "workflows"
- # type of event
- # possible values are ADD, DELETE, UPDATE
- # Optional
- type: ADD
- # Filters to apply on watched object
- # Optional
- filter:
- labels:
- workflows.argoproj.io/phase: Succeeded
- name: "my-workflow"
+spec:
+ type: "resource"
+ resource:
+ # watch workflows that are in successful state
+ example:
+ # namespace to listen events within
+ namespace: "argo-events"
+ # resource group
+ group: "argoproj.io"
+ # resource version
+ version: "v1alpha1"
+ # resource kind
+ resource: "workflows"
+ # type of event
+ # possible values are ADD, DELETE, UPDATE
+ # Optional
+ type: ADD
+ # Filters to apply on watched object
+ # Optional
+ filter:
+ labels:
+ workflows.argoproj.io/phase: Succeeded
+ name: "my-workflow"
- # watch all namespace related events
- example-with-all-types-and-no-filter: |-
- namespace: argo-events
- group: ""
- version: "v1"
- resource: "namespaces"
+ # watch all namespace related events
+ example-with-all-types-and-no-filter:
+ namespace: "argo-events"
+ group: ""
+ version: "v1"
+ resource: "namespaces"
- # create event if workflow with prefix "my-workflow" gets modified
- example-with-prefix-filter: |-
- namespace: argo-events
- group: argoproj.io
- version: v1alpha1
- resource: workflows
- type: MODIFIED
- filter:
- prefix: "my-workflow"
+ # create event if workflow with prefix "my-workflow" gets modified
+ example-with-prefix-filter:
+ namespace: "argo-events"
+ group: argoproj.io
+ version: v1alpha1
+ resource: workflows
+ type: MODIFIED
+ filter:
+ prefix: "my-workflow"
- # create event when a pod is created before 2019-03-27T010:52:32Z
- example-with-created-by-filter: |-
- namespace: argo-events
- group: argoproj.io
- version: v1alpha1
- resource: workflows
- type: ADDED
- filter:
- createdBy: "2019-04-06T12:52:11Z"
+ # create event when a pod is created before 2019-03-27T010:52:32Z
+ example-with-created-by-filter:
+ namespace: "argo-events"
+ group: argoproj.io
+ version: v1alpha1
+ resource: workflows
+ type: ADDED
+ filter:
+ createdBy: "2019-04-06T12:52:11Z"
- example-with-multi-filters: |-
- namespace: argo-events
- group: ""
- version: v1
- resource: pods
- type: ADDED
- filter:
- createdBy: "2019-04-06T12:52:11Z"
- labels:
- workflows.argoproj.io/completed: "true"
- prefix: "hello"
+ example-with-multi-filters:
+ namespace: "argo-events"
+ group: ""
+ version: v1
+ resource: pods
+ type: ADDED
+ filter:
+ createdBy: "2019-04-06T12:52:11Z"
+ labels:
+ workflows.argoproj.io/completed: "true"
+ prefix: "hello"
- # watch for completed workflows in any namespace
- example-without-namespace: |-
- # namespace: (omitted to match any namespace)
- group: "k8s.io"
- version: v1
- resource: workflows
- type: ADDED
- filter:
- labels:
- workflows.argoproj.io/completed: "true"
+ # watch for completed workflows in any namespace
+ example-without-namespace:
+ # namespace: (omitted to match any namespace)
+ group: "k8s.io"
+ version: v1
+ resource: workflows
+ type: ADDED
+ filter:
+ labels:
+ workflows.argoproj.io/completed: "true"
diff --git a/examples/event-sources/slack.yaml b/examples/event-sources/slack.yaml
index f06a3d53cb..3b40719410 100644
--- a/examples/event-sources/slack.yaml
+++ b/examples/event-sources/slack.yaml
@@ -1,54 +1,48 @@
-# This configmap contains the event sources configurations for Slack gateway
-
----
-apiVersion: v1
-kind: ConfigMap
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
metadata:
name: slack-event-source
- labels:
- # do not remove
- argo-events-event-source-version: v0.11
-data:
- example-1: |-
- # hook contains configuration for the HTTP server running in the gateway.
- # Slack will send events to following port and endpoint
- hook:
- # endpoint to listen events on
- endpoint: "/"
- # port to run HTTP server on
- port: "12000"
- # token contains information about K8s secret that stores the token
- token:
- # Name of the K8s secret that contains the token
- name: slack-secret
- # Key within the K8s secret whose corresponding value (must be base64 encoded) is token
- key: tokenkey
- # signingSecret contains information about the K8s secret that stores
- # Slack Signing Secret used to sign every request from Slack
- signingSecret:
- # Name of the K8s secret that contains the signingSecret
- name: slack-secret
- # Key within the K8s secret whose corresponding value contains the
- # base64-encoded Slack signing secret
- key: signingSecret
-
- example-2: |-
- hook:
- endpoint: "/"
- port: "13000"
- token:
- name: slack-secret-2
- key: tokenkey
+spec:
+ type: "slack"
+ slack:
+ example-insecure:
+ # hook contains configuration for the HTTP server running in the gateway.
+ # Slack will send events to following port and endpoint
+ webhook:
+ # endpoint to listen events on
+ endpoint: "/"
+ # port to run HTTP server on
+ port: "12000"
+ # token contains information about K8s secret that stores the token
+ token:
+ # Name of the K8s secret that contains the token
+ name: "slack-secret"
+ # Key within the K8s secret whose corresponding value (must be base64 encoded) is token
+ key: tokenkey
+ # signingSecret contains information about the K8s secret that stores
+ # Slack Signing Secret used to sign every request from Slack
+ signingSecret:
+ # Name of the K8s secret that contains the signingSecret
+ name: "slack-secret"
+ # Key within the K8s secret whose corresponding value contains the
+ # base64-encoded Slack signing secret
+ key: signingSecret
- # with secure connection
- example-3: |-
- hook:
- endpoint: "/"
- port: "14000"
- # path to file that is mounted in gateway pod which contains certs
- serverCertPath: "some path in pod"
- # path to file that is mounted in gateway pod which contains private key
- serverKeyPath: "some path in pod"
- token:
- name: slack-secret-3
- key: tokenkey
+ # with secure connection
+ example-secure:
+ webhook:
+ endpoint: "/"
+ port: "14000"
+ # path to file that is mounted in gateway pod which contains certs
+ serverCertPath: "some path in pod"
+ # path to file that is mounted in gateway pod which contains private key
+ serverKeyPath: "some path in pod"
+ token:
+ name: "slack-secret"
+ key: tokenkey
+ signingSecret:
+ # Name of the K8s secret that contains the signingSecret
+ name: "slack-secret"
+ # Key within the K8s secret whose corresponding value contains the
+ # base64-encoded Slack signing secret
+ key: signingSecret
diff --git a/examples/event-sources/storage-grid.yaml b/examples/event-sources/storage-grid.yaml
index d21f41b35d..74a45694ba 100644
--- a/examples/event-sources/storage-grid.yaml
+++ b/examples/event-sources/storage-grid.yaml
@@ -1,38 +1,34 @@
-# This configmap contains the event sources configurations for StorageGrid gateway
-
----
-apiVersion: v1
-kind: ConfigMap
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
metadata:
name: storage-grid-event-source
- labels:
- # do not remove
- argo-events-event-source-version: v0.11
-data:
- example: |-
- # hook contains configuration for the HTTP server running in the gateway.
- # StorageGrid will send events to following port and endpoint
- hook:
- # port to run HTTP server on
- port: "8080"
- # endpoint to listen events on
- endpoint: "/"
- # List of supported events can be derived from AWS S3 events https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#supported-notification-event-types
- # Remove S3 prefix from event type to make it a StorageGrid event.
- events:
- - "ObjectCreated:Put"
+spec:
+ type: "storage-grid"
+ storageGrid:
+ example-insecure:
+ # hook contains configuration for the HTTP server running in the gateway.
+ # StorageGrid will send events to following port and endpoint
+ webhook:
+ # port to run HTTP server on
+ port: "8080"
+ # endpoint to listen events on
+ endpoint: "/"
+ # List of supported events can be derived from AWS S3 events https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#supported-notification-event-types
+ # Remove S3 prefix from event type to make it a StorageGrid event.
+ events:
+ - "ObjectCreated:Put"
- example-secure: |-
- hook:
- # port to run HTTP server on
- port: "8090"
- # endpoint to listen events on
- endpoint: "/"
- # path to file that is mounted in gateway pod which contains certs
- serverCertPath: "some path in pod"
- # path to file that is mounted in gateway pod which contains private key
- serverKeyPath: "some path in pod"
- # for events object PUT, POST, COPY and object removal
- events:
- - "ObjectCreated:*"
- - "ObjectRemoved:Delete"
+ example-secure:
+ webhook:
+ # port to run HTTP server on
+ port: "8090"
+ # endpoint to listen events on
+ endpoint: "/"
+ # path to file that is mounted in gateway pod which contains certs
+ serverCertPath: "some path in pod"
+ # path to file that is mounted in gateway pod which contains private key
+ serverKeyPath: "some path in pod"
+ # for events object PUT, POST, COPY and object removal
+ events:
+ - "ObjectCreated:*"
+ - "ObjectRemoved:Delete"
diff --git a/examples/event-sources/webhook.yaml b/examples/event-sources/webhook.yaml
index 65bb52f771..955638c85e 100644
--- a/examples/event-sources/webhook.yaml
+++ b/examples/event-sources/webhook.yaml
@@ -1,41 +1,25 @@
-# This configmap contains the event sources configurations for Webhook gateway
-
----
-apiVersion: v1
-kind: ConfigMap
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
metadata:
name: webhook-event-source
- labels:
- # do not remove
- argo-events-event-source-version: v0.11
-data:
- # gateway can run multiple HTTP servers. Simply define a unique port to start a new HTTP server
-
- example: |-
- # port to run HTTP server on
- port: "12000"
- # endpoint to listen to
- endpoint: "/example"
- # HTTP request method to allow. In this case, only POST requests are accepted
- method: "POST"
-
- example-secure: |-
- port: "13000"
- endpoint: "/secure"
- method: "POST"
- # path to file that is mounted in gateway pod which contains certs
- serverCertPath: "/bin/webhook-secure/crt"
- # path to file that is mounted in gateway pod which contains private key
- serverKeyPath: "/bin/webhook-secure/key"
-
- # example 3 and 4 shows how you can add multiple endpoints on same HTTP server
-
- example-3: |-
- port: "14000"
- endpoint: "/example3"
- method: "PUT"
+spec:
+ type: "webhook"
+ webhook:
+ # gateway can run multiple HTTP servers. Simply define a unique port to start a new HTTP server
+ example:
+ # port to run HTTP server on
+ port: "12000"
+ # endpoint to listen to
+ endpoint: "/example"
+ # HTTP request method to allow. In this case, only POST requests are accepted
+ method: "POST"
- example-4: |-
- port: "14000"
- endpoint: "/example4"
- method: "POST"
+# Uncomment to use secure webhook
+# example-secure:
+# port: "13000"
+# endpoint: "/secure"
+# method: "POST"
+# # path to file that is mounted in gateway pod which contains certs
+# serverCertPath: "/bin/webhook-secure/crt"
+# # path to file that is mounted in gateway pod which contains private key
+# serverKeyPath: "/bin/webhook-secure/key"
diff --git a/examples/gateways/amqp.yaml b/examples/gateways/amqp.yaml
index 63148b17f4..e3bdf7b62b 100644
--- a/examples/gateways/amqp.yaml
+++ b/examples/gateways/amqp.yaml
@@ -5,14 +5,16 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
+ replica: 1
# type of the gateway
type: "amqp"
# event source configmap name
- eventSource: "amqp-event-source"
+ eventSourceRef:
+ name: "amqp-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
+
# port of the gateway server to send event source configuration to.
# you can configure it to any open port
processorPort: "9330"
@@ -42,4 +44,4 @@ spec:
# sensors to send events to
watchers:
sensors:
- - name: "amqp-sensor"
+ - name: "amqp-sensor"
diff --git a/examples/gateways/aws-sns.yaml b/examples/gateways/aws-sns.yaml
index 851ffaac3a..98e08a8a33 100644
--- a/examples/gateways/aws-sns.yaml
+++ b/examples/gateways/aws-sns.yaml
@@ -5,12 +5,13 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
- type: "aws-sns"
- eventSource: "aws-sns-event-source"
+ replica: 1
+ type: "sns"
+ eventSourceRef:
+ name: "aws-sns-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "HTTP"
diff --git a/examples/gateways/aws-sqs.yaml b/examples/gateways/aws-sqs.yaml
index 6338e259a2..5c853dd9e1 100644
--- a/examples/gateways/aws-sqs.yaml
+++ b/examples/gateways/aws-sqs.yaml
@@ -5,12 +5,13 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
- type: "aws-sqs"
- eventSource: "aws-sqs-event-source"
+ replica: 1
+ type: "sqs"
+ eventSourceRef:
+ name: "aws-sqs-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "HTTP"
diff --git a/examples/gateways/calendar.yaml b/examples/gateways/calendar.yaml
index d9002d9b85..3ce80748c6 100644
--- a/examples/gateways/calendar.yaml
+++ b/examples/gateways/calendar.yaml
@@ -5,12 +5,13 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
+ replica: 1
type: "calendar"
- eventSource: "calendar-event-source"
+ eventSourceRef:
+ name: "calendar-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "HTTP"
diff --git a/examples/gateways/file.yaml b/examples/gateways/file.yaml
index 34cf1eec84..03b375f19d 100644
--- a/examples/gateways/file.yaml
+++ b/examples/gateways/file.yaml
@@ -5,12 +5,13 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
+ replica: 1
type: "file"
- eventSource: "file-event-source"
+ eventSourceRef:
+ name: "file-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "HTTP"
diff --git a/examples/gateways/gcp-pubsub.yaml b/examples/gateways/gcp-pubsub.yaml
index c6a6083438..510c856d70 100644
--- a/examples/gateways/gcp-pubsub.yaml
+++ b/examples/gateways/gcp-pubsub.yaml
@@ -5,12 +5,13 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
+ replica: 1
type: "gcp-pubsub"
- eventSource: "gcp-pubsub-event-source"
+ eventSourceRef:
+ name: "gcp-pubsub-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "HTTP"
diff --git a/examples/gateways/github.yaml b/examples/gateways/github.yaml
index bff7bc302e..a19e4b6145 100644
--- a/examples/gateways/github.yaml
+++ b/examples/gateways/github.yaml
@@ -5,12 +5,13 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
+ replica: 1
type: "github"
- eventSource: "github-event-source"
+ eventSourceRef:
+ name: "github-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "HTTP"
diff --git a/examples/gateways/gitlab.yaml b/examples/gateways/gitlab.yaml
index 13533d91e5..af06647fc2 100644
--- a/examples/gateways/gitlab.yaml
+++ b/examples/gateways/gitlab.yaml
@@ -5,12 +5,13 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
+ replica: 1
type: "gitlab"
- eventSource: "gitlab-event-source"
+ eventSourceRef:
+ name: "gitlab-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "HTTP"
diff --git a/examples/gateways/hdfs.yaml b/examples/gateways/hdfs.yaml
index 6a6f82c42b..610c896244 100644
--- a/examples/gateways/hdfs.yaml
+++ b/examples/gateways/hdfs.yaml
@@ -5,12 +5,13 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
+ replica: 1
type: "hdfs"
- eventSource: "hdfs-event-source"
+ eventSourceRef:
+ name: "hdfs-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "HTTP"
@@ -23,15 +24,15 @@ spec:
gateway-name: "hdfs-gateway"
spec:
containers:
- - name: "gateway-client"
- image: "argoproj/gateway-client"
- imagePullPolicy: "Always"
- command: ["/bin/gateway-client"]
- - name: "hdfs-events"
- image: "argoproj/hdfs-gateway"
- imagePullPolicy: "Always"
- command: ["/bin/hdfs-gateway"]
+ - name: "gateway-client"
+ image: "argoproj/gateway-client"
+ imagePullPolicy: "Always"
+ command: ["/bin/gateway-client"]
+ - name: "hdfs-events"
+ image: "argoproj/hdfs-gateway"
+ imagePullPolicy: "Always"
+ command: ["/bin/hdfs-gateway"]
serviceAccountName: "argo-events-sa"
watchers:
sensors:
- - name: "hdfs-sensor"
+ - name: "hdfs-sensor"
diff --git a/examples/gateways/kafka.yaml b/examples/gateways/kafka.yaml
index a7b00894de..ba07d22475 100644
--- a/examples/gateways/kafka.yaml
+++ b/examples/gateways/kafka.yaml
@@ -5,12 +5,13 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
+ replica: 1
type: "kafka"
- eventSource: "kafka-event-source"
+ eventSourceRef:
+ name: "kafka-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "HTTP"
diff --git a/examples/gateways/artifact-nats-standard.yaml b/examples/gateways/minio-nats-standard.yaml
similarity index 59%
rename from examples/gateways/artifact-nats-standard.yaml
rename to examples/gateways/minio-nats-standard.yaml
index df3a60f20b..2956141f8c 100644
--- a/examples/gateways/artifact-nats-standard.yaml
+++ b/examples/gateways/minio-nats-standard.yaml
@@ -1,16 +1,17 @@
apiVersion: argoproj.io/v1alpha1
kind: Gateway
metadata:
- name: artifact-gateway-nats-standard
+ name: minio-gateway-nats-standard
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
- type: "artifact"
- eventSource: "artifact-event-source"
+ replica: 1
+ type: "minio"
+ eventSourceRef:
+ name: "minio-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "NATS"
@@ -19,17 +20,17 @@ spec:
type: "Standard"
template:
metadata:
- name: "artifact-gateway-nats-standard"
+ name: "minio-gateway-nats-standard"
labels:
- gateway-name: "artifact-gateway-nats-standard"
+ gateway-name: "minio-gateway-nats-standard"
spec:
containers:
- name: "gateway-client"
image: "argoproj/gateway-client"
imagePullPolicy: "Always"
command: ["/bin/gateway-client"]
- - name: "artifact-events"
- image: "argoproj/artifact-gateway"
+ - name: "minio-events"
+ image: "argoproj/minio-gateway"
imagePullPolicy: "Always"
- command: ["/bin/artifact-gateway"]
+ command: ["/bin/minio-gateway"]
serviceAccountName: "argo-events-sa"
diff --git a/examples/gateways/artifact-nats-streaming.yaml b/examples/gateways/minio-nats-streaming.yaml
similarity index 61%
rename from examples/gateways/artifact-nats-streaming.yaml
rename to examples/gateways/minio-nats-streaming.yaml
index 86107f4749..5e111319a8 100644
--- a/examples/gateways/artifact-nats-streaming.yaml
+++ b/examples/gateways/minio-nats-streaming.yaml
@@ -1,16 +1,17 @@
apiVersion: argoproj.io/v1alpha1
kind: Gateway
metadata:
- name: artifact-gateway-nats-streaming
+ name: minio-gateway-nats-streaming
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
- type: "artifact"
- eventSource: "artifact-event-source"
+ replica: 1
+ type: "minio"
+ eventSourceRef:
+ name: "minio-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "NATS"
@@ -21,17 +22,17 @@ spec:
type: "Streaming"
template:
metadata:
- name: "artifact-gateway-nats-streaming"
+ name: "minio-gateway-nats-streaming"
labels:
- gateway-name: "artifact-gateway-nats-streaming"
+ gateway-name: "minio-gateway-nats-streaming"
spec:
containers:
- name: "gateway-client"
image: "argoproj/gateway-client"
imagePullPolicy: "Always"
command: ["/bin/gateway-client"]
- - name: "artifact-events"
- image: "argoproj/artifact-gateway"
+ - name: "minio-events"
+ image: "argoproj/minio-gateway"
imagePullPolicy: "Always"
- command: ["/bin/artifact-gateway"]
+ command: ["/bin/minio-gateway"]
serviceAccountName: "argo-events-sa"
diff --git a/examples/gateways/artifact.yaml b/examples/gateways/minio.yaml
similarity index 59%
rename from examples/gateways/artifact.yaml
rename to examples/gateways/minio.yaml
index e1804b3378..b8492768e3 100644
--- a/examples/gateways/artifact.yaml
+++ b/examples/gateways/minio.yaml
@@ -1,14 +1,14 @@
apiVersion: argoproj.io/v1alpha1
kind: Gateway
metadata:
- name: artifact-gateway
+ name: minio-gateway
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
+ replica: 1
+ # type of the gateway
+ type: "minio"
processorPort: "9330"
eventProtocol:
type: "HTTP"
@@ -16,23 +16,25 @@ spec:
port: "9300"
template:
metadata:
- name: "artifact-gateway"
+ name: "minio-gateway"
labels:
- gateway-name: "artifact-gateway"
+ gateway-name: "minio-gateway"
spec:
containers:
- name: "gateway-client"
image: "argoproj/gateway-client"
imagePullPolicy: "Always"
command: ["/bin/gateway-client"]
- - name: "artifact-events"
- image: "argoproj/artifact-gateway"
+ - name: "minio-events"
+ image: "argoproj/minio-gateway"
imagePullPolicy: "Always"
- command: ["/bin/artifact-gateway"]
+ command: ["/bin/minio-gateway"]
serviceAccountName: "argo-events-sa"
- eventSource: "artifact-event-source"
+ eventSourceRef:
+ name: "minio-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
eventVersion: "1.0"
- type: "artifact"
watchers:
sensors:
- - name: "artifact-sensor"
+ - name: "minio-sensor"
diff --git a/examples/gateways/mqtt.yaml b/examples/gateways/mqtt.yaml
index 7a2606585e..31f36cc03a 100644
--- a/examples/gateways/mqtt.yaml
+++ b/examples/gateways/mqtt.yaml
@@ -5,12 +5,13 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
+ replica: 1
type: "mqtt"
- eventSource: "mqtt-event-source"
+ eventSourceRef:
+ name: "mqtt-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "HTTP"
diff --git a/examples/gateways/multi-watchers.yaml b/examples/gateways/multi-watchers.yaml
index c9041d5309..fdc5ef4a7f 100644
--- a/examples/gateways/multi-watchers.yaml
+++ b/examples/gateways/multi-watchers.yaml
@@ -5,12 +5,13 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
+ replica: 1
type: "webhook"
- eventSource: "webhook-event-source"
+ eventSourceRef:
+ name: "webhook-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "HTTP"
@@ -47,10 +48,10 @@ spec:
# and user must provide port and endpoint on which event should be dispatched.
# Adding gateways as watchers are particularly useful when you want to chain events.
gateways:
- - name: "webhook-gateway"
- port: "9070"
- endpoint: "/notifications"
+ - name: "webhook-gateway"
+ port: "9070"
+ endpoint: "/notifications"
sensors:
- - name: "webhook-sensor"
- - name: "multi-signal-sensor"
- - name: "webhook-time-filter-sensor"
+ - name: "webhook-sensor"
+ - name: "multi-signal-sensor"
+ - name: "webhook-time-filter-sensor"
diff --git a/examples/gateways/nats.yaml b/examples/gateways/nats.yaml
index 7dfff8682e..d740095ea3 100644
--- a/examples/gateways/nats.yaml
+++ b/examples/gateways/nats.yaml
@@ -5,12 +5,13 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
+ replica: 1
type: "nats"
- eventSource: "nats-event-source"
+ eventSourceRef:
+ name: "nats-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "HTTP"
diff --git a/examples/gateways/resource.yaml b/examples/gateways/resource.yaml
index 2ae45045d4..c6982da838 100644
--- a/examples/gateways/resource.yaml
+++ b/examples/gateways/resource.yaml
@@ -5,12 +5,13 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
+ replica: 1
type: "resource"
- eventSource: "resource-event-source"
+ eventSourceRef:
+ name: "resource-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "HTTP"
diff --git a/examples/gateways/secure-webhook.yaml b/examples/gateways/secure-webhook.yaml
index 1912efdaba..ba745fac99 100644
--- a/examples/gateways/secure-webhook.yaml
+++ b/examples/gateways/secure-webhook.yaml
@@ -5,12 +5,13 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
+ replica: 1
type: "webhook"
- eventSource: "webhook-event-source"
+ eventSourceRef:
+ name: "webhook-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "HTTP"
diff --git a/examples/gateways/sensor-in-different-namespace.yaml b/examples/gateways/sensor-in-different-namespace.yaml
index a1ee0f8fe9..5e39b7f4b8 100644
--- a/examples/gateways/sensor-in-different-namespace.yaml
+++ b/examples/gateways/sensor-in-different-namespace.yaml
@@ -5,12 +5,13 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
+ replica: 1
type: "webhook"
- eventSource: "webhook-event-source"
+ eventSourceRef:
+ name: "webhook-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "HTTP"
diff --git a/examples/gateways/slack.yaml b/examples/gateways/slack.yaml
index 5af65db03e..e7f045bf01 100644
--- a/examples/gateways/slack.yaml
+++ b/examples/gateways/slack.yaml
@@ -5,12 +5,13 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
+ replica: 1
type: "slack"
- eventSource: "slack-event-source"
+ eventSourceRef:
+ name: "slack-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "HTTP"
diff --git a/examples/gateways/storage-grid.yaml b/examples/gateways/storage-grid.yaml
index f250c424a4..ac18c5a14d 100644
--- a/examples/gateways/storage-grid.yaml
+++ b/examples/gateways/storage-grid.yaml
@@ -5,12 +5,13 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
+ replica: 1
type: "storage-grid"
- eventSource: "storage-grid-event-source"
+ eventSourceRef:
+ name: "storage-grid-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "HTTP"
@@ -43,4 +44,4 @@ spec:
type: LoadBalancer
watchers:
sensors:
- - name: "storage-grid-watcher-sensor"
+ - name: "storage-grid-watcher-sensor"
diff --git a/examples/gateways/webhook-nats-standard.yaml b/examples/gateways/webhook-nats-standard.yaml
index b3d82a184b..d452eaae2a 100644
--- a/examples/gateways/webhook-nats-standard.yaml
+++ b/examples/gateways/webhook-nats-standard.yaml
@@ -5,11 +5,12 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
- eventSource: "webhook-event-source"
+ replica: 1
+ eventSourceRef:
+ name: "webhook-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
type: "webhook"
processorPort: "9330"
eventProtocol:
diff --git a/examples/gateways/webhook-nats-streaming.yaml b/examples/gateways/webhook-nats-streaming.yaml
index e337492a70..26e251134c 100644
--- a/examples/gateways/webhook-nats-streaming.yaml
+++ b/examples/gateways/webhook-nats-streaming.yaml
@@ -5,12 +5,13 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
+ replica: 1
type: "webhook"
- eventSource: "webhook-event-source"
+ eventSourceRef:
+ name: "webhook-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "NATS"
diff --git a/examples/gateways/webhook.yaml b/examples/gateways/webhook.yaml
index 3452bfdfee..c7dc133d07 100644
--- a/examples/gateways/webhook.yaml
+++ b/examples/gateways/webhook.yaml
@@ -5,12 +5,13 @@ metadata:
labels:
# gateway controller with instanceId "argo-events" will process this gateway
gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
spec:
+ replica: 1
type: "webhook"
- eventSource: "webhook-event-source"
+ eventSourceRef:
+ name: "webhook-event-source"
+ # optional, if event source is deployed in a different namespace than the gateway
+ # namespace: "other-namespace"
processorPort: "9330"
eventProtocol:
type: "HTTP"
@@ -24,22 +25,22 @@ spec:
spec:
containers:
- name: "gateway-client"
- image: "argoproj/gateway-client"
+ image: "argoproj/gateway-client:v0.12-test"
imagePullPolicy: "Always"
command: ["/bin/gateway-client"]
- name: "webhook-events"
- image: "argoproj/webhook-gateway"
+ image: "argoproj/webhook-gateway:v0.12-test"
imagePullPolicy: "Always"
command: ["/bin/webhook-gateway"]
-# To make webhook secure, mount the secret that contains certificate and private key in the container
-# and refer that mountPath in the event source.
-# volumeMounts:
-# - mountPath: "/bin/webhook-secure"
-# name: secure
-# volumes:
-# - name: secure
-# secret:
-# secretName: webhook-secure
+ # To make webhook secure, mount the secret that contains certificate and private key in the container
+ # and refer that mountPath in the event source.
+ # volumeMounts:
+ # - mountPath: "/bin/webhook-secure"
+ # name: secure
+ # volumes:
+ # - name: secure
+ # secret:
+ # secretName: webhook-secure
serviceAccountName: "argo-events-sa"
service:
metadata:
diff --git a/examples/sensors/amqp.yaml b/examples/sensors/amqp.yaml
index 2463c0136e..f6eb7683ba 100644
--- a/examples/sensors/amqp.yaml
+++ b/examples/sensors/amqp.yaml
@@ -5,9 +5,6 @@ metadata:
labels:
# sensor controller with instanceId "argo-events" will process this sensor
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -29,7 +26,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
@@ -38,18 +35,18 @@ spec:
entrypoint: whalesay
arguments:
parameters:
- - name: message
- # this is the value that should be overridden
- value: hello world
- templates:
- - name: whalesay
- inputs:
- parameters:
- name: message
- container:
- image: docker/whalesay:latest
- command: [cowsay]
- args: ["{{inputs.parameters.message}}"]
+ # this is the value that should be overridden
+ value: hello world
+ templates:
+ - name: whalesay
+ inputs:
+ parameters:
+ - name: message
+ container:
+ image: docker/whalesay:latest
+ command: [cowsay]
+ args: ["{{inputs.parameters.message}}"]
resourceParameters:
- src:
event: "amqp-gateway:example-with-retry"
diff --git a/examples/sensors/artifact-with-param-nats-standard.yaml b/examples/sensors/artifact-with-param-nats-standard.yaml
deleted file mode 100644
index 91d95f0895..0000000000
--- a/examples/sensors/artifact-with-param-nats-standard.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-apiVersion: argoproj.io/v1alpha1
-kind: Sensor
-metadata:
- name: artifact-with-param-nats-standard-sensor
- labels:
- # sensor controller with instanceId "argo-events" will process this sensor
- sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
-spec:
- template:
- spec:
- containers:
- - name: "sensor"
- image: "argoproj/sensor"
- imagePullPolicy: Always
- serviceAccountName: argo-events-sa
- eventProtocol:
- type: "NATS"
- nats:
- type: "Standard"
- url: "nats://example-nats.argo-events:4222"
- dependencies:
- - name: "artifact-gateway-nats-standard:example-1"
- triggers:
- - template:
- name: argo-workflow
- group: argoproj.io
- version: v1alpha1
- resource: workflows
- source:
- inline: |
- apiVersion: argoproj.io/v1alpha1
- kind: Workflow
- metadata:
- generateName: artifact-workflow-
- spec:
- entrypoint: whalesay
- templates:
- - name: whalesay
- container:
- command:
- - cowsay
- image: "docker/whalesay:latest"
- # The container args from the workflow are overridden by the s3 notification key
- resourceParameters:
- - src:
- event: "artifact-gateway-nats-standard:example-1"
- path: s3.object.key
- dest: spec.templates.0.container.args.0
diff --git a/examples/sensors/artifact-with-param-nats-streaming.yaml b/examples/sensors/artifact-with-param-nats-streaming.yaml
deleted file mode 100644
index 58c0066a32..0000000000
--- a/examples/sensors/artifact-with-param-nats-streaming.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-apiVersion: argoproj.io/v1alpha1
-kind: Sensor
-metadata:
- name: artifact-with-param-nats-streaming-sensor
- labels:
- # sensor controller with instanceId "argo-events" will process this sensor
- sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
-spec:
- template:
- spec:
- containers:
- - name: "sensor"
- image: "argoproj/sensor"
- imagePullPolicy: Always
- serviceAccountName: argo-events-sa
- eventProtocol:
- type: "NATS"
- nats:
- type: "Streaming"
- url: "nats://example-nats.argo-events:4222"
- clusterId: "example-stan"
- clientId: "myclient1"
- dependencies:
- - name: "artifact-gateway-nats-streaming:example-1"
- triggers:
- - template:
- name: argo-workflow
- group: argoproj.io
- version: v1alpha1
- resource: workflows
- source:
- inline: |
- apiVersion: argoproj.io/v1alpha1
- kind: Workflow
- metadata:
- generateName: artifact-workflow-
- spec:
- entrypoint: whalesay
- templates:
- - name: whalesay
- container:
- command:
- - cowsay
- image: "docker/whalesay:latest"
- # The container args from the workflow are overridden by the s3 notification key
- resourceParameters:
- - src:
- event: "artifact-gateway-nats-streaming:example-1"
- path: s3.object.key
- dest: spec.templates.0.container.args.0
diff --git a/examples/sensors/aws-sns.yaml b/examples/sensors/aws-sns.yaml
index 66c4829297..24a9453169 100644
--- a/examples/sensors/aws-sns.yaml
+++ b/examples/sensors/aws-sns.yaml
@@ -5,10 +5,8 @@ metadata:
labels:
# sensor controller with instanceId "argo-events" will process this sensor
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
+ version: "v0.11"
template:
spec:
containers:
@@ -29,7 +27,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
diff --git a/examples/sensors/aws-sqs.yaml b/examples/sensors/aws-sqs.yaml
index 95a6f5163e..51e8feae81 100644
--- a/examples/sensors/aws-sqs.yaml
+++ b/examples/sensors/aws-sqs.yaml
@@ -5,10 +5,8 @@ metadata:
labels:
# sensor controller with instanceId "argo-events" will process this sensor
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
+ version: "v0.11"
template:
spec:
containers:
@@ -21,7 +19,7 @@ spec:
http:
port: "9300"
dependencies:
- - name: "aws-sqs-gateway:example-1"
+ - name: "aws-sqs-gateway:example"
triggers:
- template:
name: sqs-workflow
@@ -29,7 +27,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
@@ -52,5 +50,5 @@ spec:
args: ["{{inputs.parameters.message}}"]
resourceParameters:
- src:
- event: "aws-sqs-gateway:example-1"
+ event: "aws-sqs-gateway:example"
dest: spec.arguments.parameters.0.value
diff --git a/examples/sensors/calendar.yaml b/examples/sensors/calendar.yaml
index 4d4d8cc54b..8743634b9b 100644
--- a/examples/sensors/calendar.yaml
+++ b/examples/sensors/calendar.yaml
@@ -5,9 +5,6 @@ metadata:
labels:
# sensor controller with instanceId "argo-events" will process this sensor
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -29,7 +26,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
diff --git a/examples/sensors/complete-trigger-parameterization.yaml b/examples/sensors/complete-trigger-parameterization.yaml
index 2c7ad93b55..28c4eaf50e 100644
--- a/examples/sensors/complete-trigger-parameterization.yaml
+++ b/examples/sensors/complete-trigger-parameterization.yaml
@@ -21,9 +21,6 @@ metadata:
labels:
# sensor controller with instanceId "argo-events" will process this sensor
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
diff --git a/examples/sensors/context-filter-webhook.yaml b/examples/sensors/context-filter-webhook.yaml
index 4fc6d05c8a..78e57f6f47 100644
--- a/examples/sensors/context-filter-webhook.yaml
+++ b/examples/sensors/context-filter-webhook.yaml
@@ -5,9 +5,6 @@ metadata:
labels:
# sensor controller with instanceId "argo-events" will process this sensor
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
diff --git a/examples/sensors/data-filter-webhook.yaml b/examples/sensors/data-filter-webhook.yaml
index 41f56e58a5..6136382d08 100644
--- a/examples/sensors/data-filter-webhook.yaml
+++ b/examples/sensors/data-filter-webhook.yaml
@@ -5,9 +5,6 @@ metadata:
labels:
# sensor controller with instanceId "argo-events" will process this sensor
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -24,9 +21,7 @@ spec:
- path: bucket
type: string
value:
- # regular expression
- - "^bucket-.*"
- # normal value
+ - "argo-workflow-input"
- "argo-workflow-input1"
eventProtocol:
type: "HTTP"
@@ -39,7 +34,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
diff --git a/examples/sensors/dependencies-circuit-complex.yaml b/examples/sensors/dependencies-circuit-complex.yaml
deleted file mode 100644
index 78b683ef14..0000000000
--- a/examples/sensors/dependencies-circuit-complex.yaml
+++ /dev/null
@@ -1,148 +0,0 @@
-apiVersion: argoproj.io/v1alpha1
-kind: Sensor
-metadata:
- name: webhook-sensor-http
- labels:
- sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
-spec:
- template:
- spec:
- containers:
- - name: "sensor"
- image: "argoproj/sensor"
- imagePullPolicy: Always
- serviceAccountName: argo-events-sa
- dependencies:
- - name: "webhook-gateway-http:endpoint1"
- filters:
- name: "context-filter"
- context:
- source:
- host: xyz.com
- contentType: application/json
- - name: "webhook-gateway-http:endpoint2"
- - name: "webhook-gateway-http:endpoint3"
- - name: "webhook-gateway-http:endpoint4"
- filters:
- name: "data-filter"
- data:
- - path: bucket
- type: string
- value:
- - "argo-workflow-input"
- - "argo-workflow-input1"
- - name: "webhook-gateway-http:endpoint5"
- - name: "webhook-gateway-http:endpoint6"
- - name: "webhook-gateway-http:endpoint7"
- - name: "webhook-gateway-http:endpoint8"
- - name: "webhook-gateway-http:endpoint9"
- dependencyGroups:
- - name: "group_1"
- dependencies:
- - "webhook-gateway-http:endpoint1"
- - "webhook-gateway-http:endpoint2"
- - name: "group_2"
- dependencies:
- - "webhook-gateway-http:endpoint3"
- - name: "group_3"
- dependencies:
- - "webhook-gateway-http:endpoint4"
- - "webhook-gateway-http:endpoint5"
- - name: "group_4"
- dependencies:
- - "webhook-gateway-http:endpoint6"
- - "webhook-gateway-http:endpoint7"
- - "webhook-gateway-http:endpoint8"
- - name: "group_5"
- dependencies:
- - "webhook-gateway-http:endpoint9"
- circuit: "group_1 || group_2 || ((group_3 || group_4) && group_5)"
- eventProtocol:
- type: "HTTP"
- http:
- port: "9300"
- triggers:
- - template:
- when:
- any:
- - "group_1"
- - "group_2"
- name: webhook-workflow-trigger
- group: argoproj.io
- version: v1alpha1
- resource: workflows
- source:
- inline: |
- apiVersion: argoproj.io/v1alpha1
- kind: Workflow
- metadata:
- generateName: hello-1-
- spec:
- entrypoint: whalesay
- arguments:
- parameters:
- - name: message
- # this is the value that should be overridden
- value: hello world
- templates:
- - name: whalesay
- inputs:
- parameters:
- - name: message
- container:
- image: docker/whalesay:latest
- command: [cowsay]
- args: ["{{inputs.parameters.message}}"]
- resourceParameters:
- - src:
- event: "webhook-gateway-http:endpoint1"
- dest: spec.arguments.parameters.0.value
- - template:
- name: webhook-workflow-trigger-2
- when:
- all:
- - "group_5"
- - "group_4"
- group: argoproj.io
- version: v1alpha1
- resource: workflows
- source:
- inline: |
- apiVersion: argoproj.io/v1alpha1
- kind: Workflow
- metadata:
- generateName: hello-world-2-
- spec:
- entrypoint: whalesay
- templates:
- - name: whalesay
- container:
- args:
- - "hello world"
- command:
- - cowsay
- image: "docker/whalesay:latest"
- - template:
- name: webhook-workflow-trigger-common
- group: argoproj.io
- version: v1alpha1
- resource: workflows
- source:
- inline: |
- apiVersion: argoproj.io/v1alpha1
- kind: Workflow
- metadata:
- generateName: hello-world-common-
- spec:
- entrypoint: whalesay
- templates:
- - name: whalesay
- container:
- args:
- - "hello world"
- command:
- - cowsay
- image: "docker/whalesay:latest"
diff --git a/examples/sensors/dependencies-circuit.yaml b/examples/sensors/dependencies-circuit.yaml
index 8f51bc671f..3d96d3aa78 100644
--- a/examples/sensors/dependencies-circuit.yaml
+++ b/examples/sensors/dependencies-circuit.yaml
@@ -4,9 +4,6 @@ metadata:
name: webhook-sensor-http-boolean-op
labels:
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -17,17 +14,17 @@ spec:
serviceAccountName: argo-events-sa
# defines list of all events sensor will accept
dependencies:
- - name: "webhook-gateway-http:foo"
- - name: "webhook-gateway-http:index"
+ - name: "webhook-gateway:example"
+ - name: "webhook-gateway:example-secure"
# divides event dependencies into groups
dependencyGroups:
- name: "group_1"
dependencies:
- - "webhook-gateway-http:foo"
+ - "webhook-gateway:example"
- name: "group_2"
dependencies:
- - "webhook-gateway-http:index"
- # either "webhook-gateway-http:foo" or "webhook-gateway-http:index" happens
+ - "webhook-gateway:example-secure"
+ # either "webhook-gateway:example" or "webhook-gateway-http:index" happens
circuit: "group_1 || group_2"
eventProtocol:
type: "HTTP"
@@ -43,7 +40,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
@@ -67,7 +64,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
diff --git a/examples/sensors/file.yaml b/examples/sensors/file.yaml
index 50c9fabe30..bd45fd7833 100644
--- a/examples/sensors/file.yaml
+++ b/examples/sensors/file.yaml
@@ -5,9 +5,6 @@ metadata:
labels:
# sensor controller with instanceId "argo-events" will process this sensor
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -29,7 +26,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
diff --git a/examples/sensors/gcp-pubsub.yaml b/examples/sensors/gcp-pubsub.yaml
index 2c9f87d40a..bcaa4ac6e7 100644
--- a/examples/sensors/gcp-pubsub.yaml
+++ b/examples/sensors/gcp-pubsub.yaml
@@ -5,9 +5,6 @@ metadata:
labels:
# sensor controller with instanceId "argo-events" will process this sensor
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -21,7 +18,7 @@ spec:
http:
port: "9300"
dependencies:
- - name: "gcp-pubsub-gateway:example-1"
+ - name: "gcp-pubsub-gateway:example"
triggers:
- template:
name: gcp-workflow
@@ -29,7 +26,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
@@ -52,5 +49,5 @@ spec:
args: ["{{inputs.parameters.message}}"]
resourceParameters:
- src:
- event: "gcp-pubsub-gateway:example-1"
+ event: "gcp-pubsub-gateway:example"
dest: spec.arguments.parameters.0.value
diff --git a/examples/sensors/github.yaml b/examples/sensors/github.yaml
index 1d1ee72191..5ae40f3af7 100644
--- a/examples/sensors/github.yaml
+++ b/examples/sensors/github.yaml
@@ -5,9 +5,6 @@ metadata:
labels:
# sensor controller with instanceId "argo-events" will process this sensor
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -29,7 +26,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
diff --git a/examples/sensors/gitlab.yaml b/examples/sensors/gitlab.yaml
index 8cd3ea7ea9..0396bdec25 100644
--- a/examples/sensors/gitlab.yaml
+++ b/examples/sensors/gitlab.yaml
@@ -5,9 +5,6 @@ metadata:
labels:
# sensor controller with instanceId "argo-events" will process this sensor
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -29,7 +26,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
@@ -49,7 +46,7 @@ spec:
container:
image: docker/whalesay:latest
command: [cowsay]
- args: ["{{inputs.parameters.message}}"]
+ args: ["{{inputs.parameters.message}}"]
resourceParameters:
- src:
event: "gitlab-gateway:example"
diff --git a/examples/sensors/hdfs.yaml b/examples/sensors/hdfs.yaml
index 6fe21c3728..4b0622d44e 100644
--- a/examples/sensors/hdfs.yaml
+++ b/examples/sensors/hdfs.yaml
@@ -5,9 +5,6 @@ metadata:
labels:
# sensor controller with instanceId "argo-events" will process this sensor
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -17,7 +14,7 @@ spec:
imagePullPolicy: Always
serviceAccountName: argo-events-sa
dependencies:
- - name: "hdfs-gateway:example-1"
+ - name: "hdfs-gateway:example"
eventProtocol:
type: "HTTP"
http:
@@ -29,23 +26,23 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
- apiVersion: argoproj.io/v1alpha1
- kind: Workflow
- metadata:
- generateName: hello-world-
- spec:
- entrypoint: whalesay
- templates:
- - name: whalesay
- container:
- args:
- - "hello "
- command:
- - cowsay
- image: "docker/whalesay:latest"
+ resource:
+ apiVersion: argoproj.io/v1alpha1
+ kind: Workflow
+ metadata:
+ generateName: hello-world-
+ spec:
+ entrypoint: whalesay
+ templates:
+ - name: whalesay
+ container:
+ args:
+ - "hello "
+ command:
+ - cowsay
+ image: "docker/whalesay:latest"
resourceParameters:
- src:
- event: "hdfs-gateway:example-1"
+ event: "hdfs-gateway:example"
path: name
dest: spec.templates.0.container.args.1
diff --git a/examples/sensors/kafka.yaml b/examples/sensors/kafka.yaml
index 0022f60a23..382e92f697 100644
--- a/examples/sensors/kafka.yaml
+++ b/examples/sensors/kafka.yaml
@@ -5,9 +5,6 @@ metadata:
labels:
# sensor controller with instanceId "argo-events" will process this sensor
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -29,7 +26,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
diff --git a/examples/sensors/artifact.yaml b/examples/sensors/minio.yaml
similarity index 77%
rename from examples/sensors/artifact.yaml
rename to examples/sensors/minio.yaml
index e93e7166d8..ce11008a14 100644
--- a/examples/sensors/artifact.yaml
+++ b/examples/sensors/minio.yaml
@@ -1,13 +1,10 @@
apiVersion: argoproj.io/v1alpha1
kind: Sensor
metadata:
- name: artifact-sensor
+ name: minio-sensor
labels:
# sensor controller with instanceId "argo-events" will process this sensor
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -21,15 +18,15 @@ spec:
http:
port: "9300"
dependencies:
- - name: "artifact-gateway:example-1"
+ - name: "minio-gateway:example-with-filter"
triggers:
- template:
- name: artifact-workflow-trigger
+ name: minio-workflow-trigger
group: argoproj.io
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
@@ -43,10 +40,10 @@ spec:
- cowsay
image: "docker/whalesay:latest"
args:
- - THIS_WILL_BE_REPLACED
+ - THIS_WILL_BE_REPLACED
# The container args from the workflow are overridden by the s3 notification key
resourceParameters:
- src:
- event: "artifact-gateway:example-1"
+ event: "minio-gateway:example-with-filter"
path: s3.object.key
dest: spec.templates.0.container.args.0
diff --git a/examples/sensors/mqtt-sensor.yaml b/examples/sensors/mqtt-sensor.yaml
index f69d70fbe7..a20cc44172 100644
--- a/examples/sensors/mqtt-sensor.yaml
+++ b/examples/sensors/mqtt-sensor.yaml
@@ -5,9 +5,6 @@ metadata:
labels:
# sensor controller with instanceId "argo-events" will process this sensor
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -29,7 +26,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
diff --git a/examples/sensors/multi-signal-sensor.yaml b/examples/sensors/multi-signal-sensor.yaml
index e07dec477f..7273074461 100644
--- a/examples/sensors/multi-signal-sensor.yaml
+++ b/examples/sensors/multi-signal-sensor.yaml
@@ -5,9 +5,6 @@ metadata:
labels:
# sensor controller with instanceId "argo-events" will process this sensor
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -20,9 +17,9 @@ spec:
type: "HTTP"
http:
port: "9300"
- # wait for both "webhook-gateway-http:foo" and "calendar-gateway:interval" to happen
+ # wait for both "webhook-gateway:example" and "calendar-gateway:interval" to happen
dependencies:
- - name: "webhook-gateway-http:example"
+ - name: "webhook-gateway:example"
- name: "calendar-gateway:example-with-interval"
triggers:
- template:
@@ -31,7 +28,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
@@ -41,7 +38,7 @@ spec:
arguments:
parameters:
- name: message1
- # this is the value that should be overridden by event payload from webhook-gateway-http:foo
+ # this is the value that should be overridden by event payload from webhook-gateway:example
value: hello world
- name: message2
# this is the value that should be overridden by event payload from calendar-gateway:interval
diff --git a/examples/sensors/multi-trigger-sensor.yaml b/examples/sensors/multi-trigger-sensor.yaml
index b27fd27034..4c93c6697e 100644
--- a/examples/sensors/multi-trigger-sensor.yaml
+++ b/examples/sensors/multi-trigger-sensor.yaml
@@ -4,9 +4,6 @@ metadata:
name: nats-multi-trigger-sensor
labels:
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -46,19 +43,19 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
- apiVersion: argoproj.io/v1alpha1
- kind: Workflow
- metadata:
- generateName: hello-world-
- spec:
- entrypoint: whalesay
- templates:
- -
- container:
- args:
- - "hello world"
- command:
- - cowsay
- image: "docker/whalesay:latest"
- name: whalesay
+ resource:
+ apiVersion: argoproj.io/v1alpha1
+ kind: Workflow
+ metadata:
+ generateName: hello-world-
+ spec:
+ entrypoint: whalesay
+ templates:
+ -
+ container:
+ args:
+ - "hello world"
+ command:
+ - cowsay
+ image: "docker/whalesay:latest"
+ name: whalesay
diff --git a/examples/sensors/nats.yaml b/examples/sensors/nats.yaml
index 6823258895..c5107b5954 100644
--- a/examples/sensors/nats.yaml
+++ b/examples/sensors/nats.yaml
@@ -6,9 +6,6 @@ metadata:
name: nats-sensor
labels:
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -30,7 +27,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
diff --git a/examples/sensors/resource.yaml b/examples/sensors/resource.yaml
index 9f2a8d46e4..5e239f1499 100644
--- a/examples/sensors/resource.yaml
+++ b/examples/sensors/resource.yaml
@@ -4,9 +4,6 @@ metadata:
name: resource-sensor
labels:
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -28,7 +25,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
diff --git a/examples/sensors/slack.yaml b/examples/sensors/slack.yaml
index d17dff1a3c..257b524d97 100644
--- a/examples/sensors/slack.yaml
+++ b/examples/sensors/slack.yaml
@@ -4,9 +4,6 @@ metadata:
name: slack-sensor
labels:
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -20,7 +17,7 @@ spec:
http:
port: "9300"
dependencies:
- - name: "slack-gateway:example-1"
+ - name: "slack-gateway:example-insecure"
triggers:
- template:
name: slack-workflow
@@ -28,7 +25,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
@@ -51,5 +48,5 @@ spec:
args: ["{{inputs.parameters.message}}"]
resourceParameters:
- src:
- event: "slack-gateway:example-1"
+ event: "slack-gateway:example-insecure"
dest: spec.arguments.parameters.0.value
diff --git a/examples/sensors/storage-grid.yaml b/examples/sensors/storage-grid.yaml
index 583064de68..5c92f03cf3 100644
--- a/examples/sensors/storage-grid.yaml
+++ b/examples/sensors/storage-grid.yaml
@@ -4,9 +4,6 @@ metadata:
name: storage-grid-watcher-sensor
labels:
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -28,7 +25,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ source:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
diff --git a/examples/sensors/time-filter-webhook.yaml b/examples/sensors/time-filter-webhook.yaml
index 0549edeeb8..0b1aa63df7 100644
--- a/examples/sensors/time-filter-webhook.yaml
+++ b/examples/sensors/time-filter-webhook.yaml
@@ -4,9 +4,6 @@ metadata:
name: webhook-time-filter-sensor
labels:
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -32,19 +29,19 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
- apiVersion: argoproj.io/v1alpha1
- kind: Workflow
- metadata:
- generateName: time-filter-hello-world-
- spec:
- entrypoint: whalesay
- templates:
- -
- container:
- args:
- - "hello world"
- command:
- - cowsay
- image: "docker/whalesay:latest"
- name: whalesay
+ resource:
+ apiVersion: argoproj.io/v1alpha1
+ kind: Workflow
+ metadata:
+ generateName: time-filter-hello-world-
+ spec:
+ entrypoint: whalesay
+ templates:
+ -
+ container:
+ args:
+ - "hello world"
+ command:
+ - cowsay
+ image: "docker/whalesay:latest"
+ name: whalesay
diff --git a/examples/sensors/trigger-gateway.yaml b/examples/sensors/trigger-gateway.yaml
deleted file mode 100644
index 27385d9297..0000000000
--- a/examples/sensors/trigger-gateway.yaml
+++ /dev/null
@@ -1,96 +0,0 @@
-apiVersion: argoproj.io/v1alpha1
-kind: Sensor
-metadata:
- name: trigger-gateway-sensor
- labels:
- sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
-spec:
- template:
- spec:
- containers:
- - name: "sensor"
- image: "argoproj/sensor"
- imagePullPolicy: Always
- serviceAccountName: argo-events-sa
- eventProtocol:
- type: "HTTP"
- http:
- port: "9300"
- dependencies:
- - name: "webhook-gateway:example"
- # once sensor receives an event from webhook gateway, it will create an artifact gateway.
- triggers:
- - template:
- name: artifact-event-source-trigger
- group: ""
- version: v1
- resource: configmaps
- source:
- inline: |
- apiVersion: v1
- kind: Configmap
- metadata:
- name: artifact-event-source
- labels:
- argo-events-event-source-version: v0.11
- spec:
- data:
- example: |-
- bucket:
- name: input
- endpoint: minio-service.argo-events:9000
- event: s3:ObjectCreated:Put
- filter:
- prefix: ""
- suffix: ""
- insecure: true
- accessKey:
- key: accesskey
- name: artifacts-minio
- secretKey:
- key: secretkey
- name: artifacts-minio
- - template:
- name: artifact-gateway-trigger
- group: argoproj.io
- version: v1alpha1
- resource: gateways
- source:
- inline: |-
- apiVersion: argoproj.io/v1alpha1
- kind: Gateway
- metadata:
- name: artifact-gateway
- labels:
- gateways.argoproj.io/gateway-controller-instanceid: argo-events
- argo-events-gateway-version: v0.11
- spec:
- type: "artifact"
- eventSource: "artifact-event-source"
- processorPort: "9330"
- eventProtocol:
- type: "HTTP"
- http:
- port: "9300"
- template:
- metadata:
- name: "artifact-gateway"
- labels:
- gateway-name: "artifact-gateway"
- spec:
- containers:
- - name: "gateway-client"
- image: "argoproj/gateway-client"
- imagePullPolicy: "Always"
- command: ["/bin/gateway-client"]
- - name: "artifact-events"
- image: "argoproj/artifact-gateway"
- imagePullPolicy: "Always"
- command: ["/bin/artifact-gateway"]
- serviceAccountName: "argo-events-sa"
- watchers:
- sensors:
- - name: "artifact-sensor"
diff --git a/examples/sensors/trigger-resource.yaml b/examples/sensors/trigger-resource.yaml
deleted file mode 100644
index c59990753e..0000000000
--- a/examples/sensors/trigger-resource.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
-apiVersion: argoproj.io/v1alpha1
-kind: Sensor
-metadata:
- name: trigger-resource-sensor
- labels:
- sensors.argoproj.io/sensor-controller-instanceid: argo-events
-spec:
- template:
- spec:
- containers:
- - name: "sensor"
- image: "argoproj/sensor"
- imagePullPolicy: Always
- serviceAccountName: argo-events-sa
- dependencies:
- - name: "webhook-gateway-http:foo"
- eventProtocol:
- type: "HTTP"
- http:
- port: "9300"
- triggers:
- - template:
- name: trigger1
- group: argoproj.io
- version: v1alpha1
- resource: workflows
- source:
- # resource is generic template for K8s resource
- # This is similar to `inline` trigger but useful if you are using kustomize and want to parameterize the trigger
- resource:
- apiVersion: argoproj.io/v1alpha1
- kind: Workflow
- metadata:
- generateName: webhook-
- spec:
- entrypoint: whalesay
- arguments:
- parameters:
- - name: message
- # this is the value that should be overridden
- value: hello world
- templates:
- - name: whalesay
- inputs:
- parameters:
- - name: message
- container:
- image: docker/whalesay:latest
- command: [cowsay]
- args: ["{{inputs.parameters.message}}"]
- resourceParameters:
- - src:
- event: "webhook-gateway-http:foo"
- dest: spec.arguments.parameters.0.value
diff --git a/examples/sensors/trigger-source-configmap.yaml b/examples/sensors/trigger-source-configmap.yaml
index 229c49c361..b82b1a3a99 100644
--- a/examples/sensors/trigger-source-configmap.yaml
+++ b/examples/sensors/trigger-source-configmap.yaml
@@ -4,9 +4,6 @@ metadata:
name: trigger-source-configmap-sensor
labels:
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -20,10 +17,10 @@ spec:
http:
port: "9300"
dependencies:
- - name: "artifact-gateway:example-1"
+ - name: "minio-gateway:example-1"
triggers:
- template:
- name: artifact-workflow-trigger
+ name: minio-workflow-trigger
group: argoproj.io
version: v1alpha1
resource: workflows
diff --git a/examples/sensors/trigger-source-file.yaml b/examples/sensors/trigger-source-file.yaml
index 95906341e0..3340d3962b 100644
--- a/examples/sensors/trigger-source-file.yaml
+++ b/examples/sensors/trigger-source-file.yaml
@@ -5,9 +5,6 @@ metadata:
labels:
# sensor controller with instanceId "argo-events" will process this sensor
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
diff --git a/examples/sensors/trigger-source-git.yaml b/examples/sensors/trigger-source-git.yaml
index 2c10ad1002..b0d48f374d 100644
--- a/examples/sensors/trigger-source-git.yaml
+++ b/examples/sensors/trigger-source-git.yaml
@@ -4,9 +4,6 @@ metadata:
name: trigger-source-git
labels:
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -39,7 +36,7 @@ spec:
secretName: git-known-hosts
serviceAccountName: argo-events-sa
dependencies:
- - name: "webhook-gateway-http:foo"
+ - name: "webhook-gateway:example"
eventProtocol:
type: "HTTP"
http:
diff --git a/examples/sensors/trigger-standard-k8s-resource.yaml b/examples/sensors/trigger-standard-k8s-resource.yaml
index 75fc4d9ddb..8cc35761da 100644
--- a/examples/sensors/trigger-standard-k8s-resource.yaml
+++ b/examples/sensors/trigger-standard-k8s-resource.yaml
@@ -4,9 +4,6 @@ metadata:
name: webhook-sensor-http
labels:
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -16,7 +13,7 @@ spec:
imagePullPolicy: Always
serviceAccountName: argo-events-sa
dependencies:
- - name: "webhook-gateway-http:foo"
+ - name: "webhook-gateway:example"
eventProtocol:
type: "HTTP"
http:
@@ -29,7 +26,7 @@ spec:
version: v1
resource: pods
source:
- inline: |
+ resource:
apiVersion: v1
kind: Pod
metadata:
@@ -48,7 +45,7 @@ spec:
version: v1
resource: deployments
source:
- inline: |
+ resource:
apiVersion: apps/v1
kind: Deployment
metadata:
diff --git a/examples/sensors/trigger-with-backoff.yaml b/examples/sensors/trigger-with-backoff.yaml
index 014140cde9..b50ce7dc1f 100644
--- a/examples/sensors/trigger-with-backoff.yaml
+++ b/examples/sensors/trigger-with-backoff.yaml
@@ -4,9 +4,6 @@ metadata:
name: trigger-backoff
labels:
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -16,7 +13,7 @@ spec:
imagePullPolicy: Always
serviceAccountName: argo-events-sa
dependencies:
- - name: "webhook-gateway-http:foo"
+ - name: "webhook-gateway:example"
eventProtocol:
type: "HTTP"
http:
@@ -58,7 +55,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
@@ -81,7 +78,7 @@ spec:
args: ["{{inputs.parameters.message}}"]
resourceParameters:
- src:
- event: "webhook-gateway-http:foo"
+ event: "webhook-gateway:example"
dest: spec.arguments.parameters.0.value
- template:
name: trigger-2
@@ -97,9 +94,9 @@ spec:
errorOnBackoffTimeout: false
group: argoproj.io
version: v1alpha1
- resource: workflows
+ kind: Workflow
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
diff --git a/examples/sensors/url-sensor.yaml b/examples/sensors/url-sensor.yaml
index 550d7b6698..96be1455a2 100644
--- a/examples/sensors/url-sensor.yaml
+++ b/examples/sensors/url-sensor.yaml
@@ -4,9 +4,6 @@ metadata:
name: url-sensor
labels:
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -20,7 +17,7 @@ spec:
http:
port: "9300"
dependencies:
- - name: "artifact-gateway:input"
+ - name: "minio-gateway:example-with-filter"
triggers:
- template:
name: url-workflow-trigger
diff --git a/examples/sensors/webhook-nats-streaming.yaml b/examples/sensors/webhook-nats-streaming.yaml
index 4aa7d0ca83..44749a3410 100644
--- a/examples/sensors/webhook-nats-streaming.yaml
+++ b/examples/sensors/webhook-nats-streaming.yaml
@@ -4,9 +4,6 @@ metadata:
name: webhook-nats-streaming
labels:
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -32,7 +29,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
diff --git a/examples/sensors/webhook-nats.yaml b/examples/sensors/webhook-nats.yaml
index 13633b444c..3d1d940331 100644
--- a/examples/sensors/webhook-nats.yaml
+++ b/examples/sensors/webhook-nats.yaml
@@ -4,9 +4,6 @@ metadata:
name: webhook-sensor
labels:
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -29,7 +26,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
diff --git a/examples/sensors/webhook.yaml b/examples/sensors/webhook.yaml
index 8bff451423..0f4329e22b 100644
--- a/examples/sensors/webhook.yaml
+++ b/examples/sensors/webhook.yaml
@@ -4,9 +4,6 @@ metadata:
name: webhook-sensor
labels:
sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
spec:
template:
spec:
@@ -28,7 +25,7 @@ spec:
version: v1alpha1
resource: workflows
source:
- inline: |
+ resource:
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
diff --git a/gateways/Dockerfile b/gateways/client/Dockerfile
similarity index 100%
rename from gateways/Dockerfile
rename to gateways/client/Dockerfile
diff --git a/gateways/cmd/main.go b/gateways/client/client.go
similarity index 78%
rename from gateways/cmd/main.go
rename to gateways/client/client.go
index 4ea1bf8e8a..b86599b1f6 100644
--- a/gateways/cmd/main.go
+++ b/gateways/client/client.go
@@ -24,13 +24,12 @@ import (
"time"
"github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
"k8s.io/apimachinery/pkg/util/wait"
)
func main() {
- // initialize gateway configuration
- gc := gateways.NewGatewayConfiguration()
+ // initialize gateway context
+ ctx := NewGatewayContext()
serverPort, ok := os.LookupEnv(common.EnvVarGatewayServerPort)
if !ok {
@@ -53,19 +52,19 @@ func main() {
panic(fmt.Errorf("failed to connect to server on port %s", serverPort))
}
- // handle event source's status updates
+ // handle gateway status updates
go func() {
- for status := range gc.StatusCh {
- gc.UpdateGatewayResourceState(&status)
+ for status := range ctx.statusCh {
+ ctx.UpdateGatewayState(&status)
}
}()
// watch updates to gateway resource
- if _, err := gc.WatchGateway(context.Background()); err != nil {
+ if _, err := ctx.WatchGatewayUpdates(context.Background()); err != nil {
panic(err)
}
// watch for event source updates
- if _, err := gc.WatchGatewayEventSources(context.Background()); err != nil {
+ if _, err := ctx.WatchGatewayEventSources(context.Background()); err != nil {
panic(err)
}
select {}
diff --git a/gateways/client/context.go b/gateways/client/context.go
new file mode 100644
index 0000000000..91417d18a5
--- /dev/null
+++ b/gateways/client/context.go
@@ -0,0 +1,158 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ pc "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
+ eventsourceClientset "github.com/argoproj/argo-events/pkg/client/eventsources/clientset/versioned"
+ gwclientset "github.com/argoproj/argo-events/pkg/client/gateway/clientset/versioned"
+ "github.com/nats-io/go-nats"
+ snats "github.com/nats-io/go-nats-streaming"
+ "github.com/sirupsen/logrus"
+ "google.golang.org/grpc"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+)
+
+// GatewayContext holds the context for a gateway
+type GatewayContext struct {
+ // logger logs stuff
+ logger *logrus.Logger
+ // k8sClient is client for kubernetes API
+ k8sClient kubernetes.Interface
+ // eventSourceRef refers to event-source for the gateway
+ eventSourceRef *v1alpha1.EventSourceRef
+ // eventSourceClient is the client for EventSourceRef resource
+ eventSourceClient eventsourceClientset.Interface
+ // name of the gateway
+ name string
+ // namespace where gateway is deployed
+ namespace string
+ // gateway refers to Gateway custom resource
+ gateway *v1alpha1.Gateway
+ // gatewayClient is gateway clientset
+ gatewayClient gwclientset.Interface
+ // updated indicates whether gateway resource is updated
+ updated bool
+ // serverPort is gateway server port to listen events from
+ serverPort string
+ // eventSourceContexts stores information about current event sources that are running in the gateway
+ eventSourceContexts map[string]*EventSourceContext
+ // controllerInstanceId is instance ID of the gateway controller
+ controllerInstanceID string
+ // statusCh is used to communicate the status of an event source
+ statusCh chan EventSourceStatus
+ // natsConn is the standard nats connection used to publish events to cluster. Only used if dispatch protocol is NATS
+ natsConn *nats.Conn
+ // natsStreamingConn is the nats connection used for streaming.
+ natsStreamingConn snats.Conn
+ // sensorHttpPort is the http server running in sensor that listens to event. Only used if dispatch protocol is HTTP
+ sensorHttpPort string
+}
+
+// EventSourceContext contains information of a event source for gateway to run.
+type EventSourceContext struct {
+ // source holds the actual event source
+ source *gateways.EventSource
+ // ctx contains context for the connection
+ ctx context.Context
+ // cancel upon invocation cancels the connection context
+ cancel context.CancelFunc
+ // client is grpc client
+ client gateways.EventingClient
+ // conn is grpc connection
+ conn *grpc.ClientConn
+}
+
+// NewGatewayContext returns a new gateway context
+func NewGatewayContext() *GatewayContext {
+ kubeConfig, _ := os.LookupEnv(common.EnvVarKubeConfig)
+ restConfig, err := common.GetClientConfig(kubeConfig)
+ if err != nil {
+ panic(err)
+ }
+ name, ok := os.LookupEnv(common.EnvVarResourceName)
+ if !ok {
+ panic("gateway name not provided")
+ }
+ namespace, ok := os.LookupEnv(common.EnvVarNamespace)
+ if !ok {
+ panic("no namespace provided")
+ }
+ controllerInstanceID, ok := os.LookupEnv(common.EnvVarControllerInstanceID)
+ if !ok {
+ panic("gateway controller instance ID is not provided")
+ }
+ serverPort, ok := os.LookupEnv(common.EnvVarGatewayServerPort)
+ if !ok {
+ panic("server port is not provided")
+ }
+
+ clientset := kubernetes.NewForConfigOrDie(restConfig)
+ gatewayClient := gwclientset.NewForConfigOrDie(restConfig)
+ eventSourceClient := eventsourceClientset.NewForConfigOrDie(restConfig)
+
+ gateway, err := gatewayClient.ArgoprojV1alpha1().Gateways(namespace).Get(name, metav1.GetOptions{})
+ if err != nil {
+ panic(err)
+ }
+
+ gatewayConfig := &GatewayContext{
+ logger: common.NewArgoEventsLogger().WithFields(
+ map[string]interface{}{
+ common.LabelResourceName: gateway.Name,
+ common.LabelNamespace: gateway.Namespace,
+ }).Logger,
+ k8sClient: clientset,
+ namespace: namespace,
+ name: name,
+ eventSourceContexts: make(map[string]*EventSourceContext),
+ eventSourceRef: gateway.Spec.EventSourceRef,
+ eventSourceClient: eventSourceClient,
+ gatewayClient: gatewayClient,
+ gateway: gateway,
+ controllerInstanceID: controllerInstanceID,
+ serverPort: serverPort,
+ statusCh: make(chan EventSourceStatus),
+ }
+
+ switch gateway.Spec.EventProtocol.Type {
+ case pc.HTTP:
+ gatewayConfig.sensorHttpPort = gateway.Spec.EventProtocol.Http.Port
+ case pc.NATS:
+ if gatewayConfig.natsConn, err = nats.Connect(gateway.Spec.EventProtocol.Nats.URL); err != nil {
+ panic(fmt.Errorf("failed to obtain NATS standard connection. err: %+v", err))
+ }
+ gatewayConfig.logger.WithField(common.LabelURL, gateway.Spec.EventProtocol.Nats.URL).Infoln("connected to nats service")
+
+ if gatewayConfig.gateway.Spec.EventProtocol.Nats.Type == pc.Streaming {
+ gatewayConfig.natsStreamingConn, err = snats.Connect(gatewayConfig.gateway.Spec.EventProtocol.Nats.ClusterId, gatewayConfig.gateway.Spec.EventProtocol.Nats.ClientId, snats.NatsConn(gatewayConfig.natsConn))
+ if err != nil {
+ panic(fmt.Errorf("failed to obtain NATS streaming connection. err: %+v", err))
+ }
+ gatewayConfig.logger.WithField(common.LabelURL, gateway.Spec.EventProtocol.Nats.URL).Infoln("nats streaming connection successful")
+ }
+ }
+ return gatewayConfig
+}
diff --git a/gateways/client/event-source_test.go b/gateways/client/event-source_test.go
new file mode 100644
index 0000000000..a825c13ac3
--- /dev/null
+++ b/gateways/client/event-source_test.go
@@ -0,0 +1,240 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "net/http"
+ "testing"
+ "time"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ esv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
+ gwfake "github.com/argoproj/argo-events/pkg/client/gateway/clientset/versioned/fake"
+ "github.com/stretchr/testify/assert"
+ "google.golang.org/grpc"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes/fake"
+)
+
+func getGatewayContext() *GatewayContext {
+ return &GatewayContext{
+ logger: common.NewArgoEventsLogger(),
+ serverPort: "20000",
+ statusCh: make(chan EventSourceStatus),
+ gateway: &v1alpha1.Gateway{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "fake-gateway",
+ Namespace: "fake-namespace",
+ },
+ Spec: v1alpha1.GatewaySpec{
+ Watchers: &v1alpha1.NotificationWatchers{
+ Sensors: []v1alpha1.SensorNotificationWatcher{},
+ },
+ EventProtocol: &apicommon.EventProtocol{
+ Type: apicommon.HTTP,
+ Http: apicommon.Http{
+ Port: "9000",
+ },
+ },
+ Type: apicommon.WebhookEvent,
+ },
+ },
+ eventSourceContexts: make(map[string]*EventSourceContext),
+ k8sClient: fake.NewSimpleClientset(),
+ gatewayClient: gwfake.NewSimpleClientset(),
+ }
+}
+
+func getEventSource() *esv1alpha1.EventSource {
+ return &esv1alpha1.EventSource{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "fake-event-source",
+ Namespace: "fake-namespace",
+ },
+ Spec: &esv1alpha1.EventSourceSpec{
+ Webhook: map[string]webhook.Context{
+ "first-webhook": {
+ Endpoint: "/first-webhook",
+ Method: http.MethodPost,
+ Port: "13000",
+ },
+ },
+ Type: apicommon.WebhookEvent,
+ },
+ }
+}
+
+// Set up a fake gateway server
+type testEventListener struct{}
+
+func (listener *testEventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
+ defer func() {
+ if r := recover(); r != nil {
+ fmt.Println(r)
+ }
+ }()
+ _ = eventStream.Send(&gateways.Event{
+ Name: eventSource.Name,
+ Payload: []byte("test payload"),
+ })
+
+ <-eventStream.Context().Done()
+
+ return nil
+}
+
+func (listener *testEventListener) ValidateEventSource(ctx context.Context, eventSource *gateways.EventSource) (*gateways.ValidEventSource, error) {
+ return &gateways.ValidEventSource{
+ IsValid: true,
+ }, nil
+}
+
+func getGatewayServer() *grpc.Server {
+ srv := grpc.NewServer()
+ gateways.RegisterEventingServer(srv, &testEventListener{})
+ return srv
+}
+
+func TestInitEventSourceContexts(t *testing.T) {
+ gatewayContext := getGatewayContext()
+ eventSource := getEventSource().DeepCopy()
+
+ lis, err := net.Listen("tcp", fmt.Sprintf(":%s", gatewayContext.serverPort))
+ if err != nil {
+ panic(err)
+ }
+
+ server := getGatewayServer()
+ stopCh := make(chan struct{})
+
+ go func() {
+ if err := server.Serve(lis); err != nil {
+ return
+ }
+ }()
+
+ go func() {
+ <-stopCh
+ server.GracefulStop()
+ fmt.Println("server is stopped")
+ }()
+
+ contexts := gatewayContext.initEventSourceContexts(eventSource)
+ assert.NotNil(t, contexts)
+ for _, esContext := range contexts {
+ assert.Equal(t, "first-webhook", esContext.source.Name)
+ assert.NotNil(t, esContext.conn)
+ }
+
+ stopCh <- struct{}{}
+
+ time.Sleep(5 * time.Second)
+}
+
+func TestSyncEventSources(t *testing.T) {
+ gatewayContext := getGatewayContext()
+ eventSource := getEventSource().DeepCopy()
+
+ lis, err := net.Listen("tcp", fmt.Sprintf(":%s", gatewayContext.serverPort))
+ if err != nil {
+ panic(err)
+ }
+
+ server := getGatewayServer()
+ stopCh := make(chan struct{})
+ stopStatus := make(chan struct{})
+
+ go func() {
+ if err := server.Serve(lis); err != nil {
+ fmt.Println(err)
+ return
+ }
+ }()
+
+ go func() {
+ for {
+ select {
+ case status := <-gatewayContext.statusCh:
+ fmt.Println(status.Message)
+ case <-stopStatus:
+ fmt.Println("returning from status")
+ return
+ }
+ }
+ }()
+
+ go func() {
+ <-stopCh
+ server.GracefulStop()
+ fmt.Println("server is stopped")
+ stopStatus <- struct{}{}
+ }()
+
+ err = gatewayContext.syncEventSources(eventSource)
+ assert.Nil(t, err)
+
+ time.Sleep(5 * time.Second)
+
+ delete(eventSource.Spec.Webhook, "first-webhook")
+
+ eventSource.Spec.Webhook["second-webhook"] = webhook.Context{
+ Endpoint: "/second-webhook",
+ Method: http.MethodPost,
+ Port: "13000",
+ }
+
+ err = gatewayContext.syncEventSources(eventSource)
+ assert.Nil(t, err)
+
+ time.Sleep(5 * time.Second)
+
+ delete(eventSource.Spec.Webhook, "second-webhook")
+
+ err = gatewayContext.syncEventSources(eventSource)
+ assert.Nil(t, err)
+
+ time.Sleep(5 * time.Second)
+
+ stopCh <- struct{}{}
+
+ time.Sleep(5 * time.Second)
+}
+
+func TestDiffEventSources(t *testing.T) {
+ gatewayContext := getGatewayContext()
+ eventSourceContexts := map[string]*EventSourceContext{
+ "first-webhook": {},
+ }
+ assert.NotNil(t, eventSourceContexts)
+ staleEventSources, newEventSources := gatewayContext.diffEventSources(eventSourceContexts)
+ assert.Nil(t, staleEventSources)
+ assert.NotNil(t, newEventSources)
+ gatewayContext.eventSourceContexts = map[string]*EventSourceContext{
+ "first-webhook": {},
+ }
+ delete(eventSourceContexts, "first-webhook")
+ staleEventSources, newEventSources = gatewayContext.diffEventSources(eventSourceContexts)
+ assert.NotNil(t, staleEventSources)
+ assert.Nil(t, newEventSources)
+}
diff --git a/gateways/client/event-sources.go b/gateways/client/event-sources.go
new file mode 100644
index 0000000000..0b0126a109
--- /dev/null
+++ b/gateways/client/event-sources.go
@@ -0,0 +1,356 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "context"
+ "fmt"
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ eventSourceV1Alpha1 "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/argoproj/argo-events/pkg/apis/gateway"
+ "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/sirupsen/logrus"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/connectivity"
+ "io"
+)
+
+// populateEventSourceContexts sets up the contexts for event sources
+func (gatewayContext *GatewayContext) populateEventSourceContexts(name string, value interface{}, eventSourceContexts map[string]*EventSourceContext) {
+ body, err := yaml.Marshal(value)
+ if err != nil {
+ gatewayContext.logger.WithField("event-source-name", name).Errorln("failed to marshal the event source value, won't process it")
+ return
+ }
+
+ hashKey := common.Hasher(name + string(body))
+
+ logger := gatewayContext.logger.WithFields(logrus.Fields{
+ "name": name,
+ "value": value,
+ })
+
+ logger.WithField("hash", hashKey).Debugln("hash of the event source")
+
+ // create a connection to gateway server
+ connCtx, cancel := context.WithCancel(context.Background())
+ conn, err := grpc.Dial(
+ fmt.Sprintf("localhost:%s", gatewayContext.serverPort),
+ grpc.WithBlock(),
+ grpc.WithInsecure())
+ if err != nil {
+ logger.WithError(err).Errorln("failed to connect to gateway server")
+ cancel()
+ return
+ }
+
+ logger.WithField("state", conn.GetState().String()).Info("state of the connection to gateway server")
+
+ eventSourceContexts[hashKey] = &EventSourceContext{
+ source: &gateways.EventSource{
+ Id: hashKey,
+ Name: name,
+ Value: body,
+ Type: string(gatewayContext.gateway.Spec.Type),
+ },
+ cancel: cancel,
+ ctx: connCtx,
+ client: gateways.NewEventingClient(conn),
+ conn: conn,
+ }
+}
+
+// diffConfig diffs currently active event sources and updated event sources.
+// It simply matches the event source strings. So, if event source string differs through some sequence of definition
+// and although the event sources are actually same, this method will treat them as different event sources.
+// old event sources - event sources to be deactivate
+// new event sources - new event sources to activate
+func (gatewayContext *GatewayContext) diffEventSources(eventSourceContexts map[string]*EventSourceContext) (staleEventSources []string, newEventSources []string) {
+ var currentEventSources []string
+ var updatedEventSources []string
+
+ for currentEventSource := range gatewayContext.eventSourceContexts {
+ currentEventSources = append(currentEventSources, currentEventSource)
+ }
+ for updatedEventSource := range eventSourceContexts {
+ updatedEventSources = append(updatedEventSources, updatedEventSource)
+ }
+
+ gatewayContext.logger.WithField("current-event-sources-keys", currentEventSources).Debugln("event sources hashes")
+ gatewayContext.logger.WithField("updated-event-sources-keys", updatedEventSources).Debugln("event sources hashes")
+
+ swapped := false
+ // iterates over current event sources and updated event sources
+ // and creates two arrays, first one containing event sources that need to removed
+ // and second containing new event sources that need to be added and run.
+ for i := 0; i < 2; i++ {
+ for _, currentEventSource := range currentEventSources {
+ found := false
+ for _, updatedEventSource := range updatedEventSources {
+ if currentEventSource == updatedEventSource {
+ found = true
+ break
+ }
+ }
+ if !found {
+ if swapped {
+ newEventSources = append(newEventSources, currentEventSource)
+ } else {
+ staleEventSources = append(staleEventSources, currentEventSource)
+ }
+ }
+ }
+ if i == 0 {
+ currentEventSources, updatedEventSources = updatedEventSources, currentEventSources
+ swapped = true
+ }
+ }
+ return
+}
+
+// activateEventSources activate new event sources
+func (gatewayContext *GatewayContext) activateEventSources(eventSources map[string]*EventSourceContext, keys []string) {
+ for _, key := range keys {
+ eventSource := eventSources[key]
+ // register the event source
+ gatewayContext.eventSourceContexts[key] = eventSource
+
+ logger := gatewayContext.logger.WithField(common.LabelEventSource, eventSource.source.Name)
+
+ logger.Infoln("activating new event source...")
+
+ go func() {
+ // conn should be in READY state
+ if eventSource.conn.GetState() != connectivity.Ready {
+ logger.Errorln("connection is not in ready state.")
+ gatewayContext.statusCh <- EventSourceStatus{
+ Phase: v1alpha1.NodePhaseError,
+ Id: eventSource.source.Id,
+ Message: "connection_is_not_in_ready_state",
+ Name: eventSource.source.Name,
+ }
+ return
+ }
+
+ // validate event source
+ if valid, _ := eventSource.client.ValidateEventSource(eventSource.ctx, eventSource.source); !valid.IsValid {
+ logger.WithFields(
+ map[string]interface{}{
+ "validation-failure": valid.Reason,
+ },
+ ).Errorln("event source is not valid")
+ if err := eventSource.conn.Close(); err != nil {
+ logger.WithError(err).Errorln("failed to close client connection")
+ }
+ gatewayContext.statusCh <- EventSourceStatus{
+ Phase: v1alpha1.NodePhaseError,
+ Id: eventSource.source.Id,
+ Message: "event_source_is_not_valid",
+ Name: eventSource.source.Name,
+ }
+ return
+ }
+
+ logger.Infoln("event source is valid")
+
+ // mark event source as running
+ gatewayContext.statusCh <- EventSourceStatus{
+ Phase: v1alpha1.NodePhaseRunning,
+ Message: "event_source_is_running",
+ Id: eventSource.source.Id,
+ Name: eventSource.source.Name,
+ }
+
+ // listen to events from gateway server
+ eventStream, err := eventSource.client.StartEventSource(eventSource.ctx, eventSource.source)
+ if err != nil {
+ logger.WithError(err).Errorln("error occurred while starting event source")
+ gatewayContext.statusCh <- EventSourceStatus{
+ Phase: v1alpha1.NodePhaseError,
+ Message: "failed_to_receive_event_stream",
+ Name: eventSource.source.Name,
+ Id: eventSource.source.Id,
+ }
+ return
+ }
+
+ logger.Infoln("listening to events from gateway server...")
+ for {
+ event, err := eventStream.Recv()
+ if err != nil {
+ if err == io.EOF {
+ logger.Infoln("event source has stopped")
+ gatewayContext.statusCh <- EventSourceStatus{
+ Phase: v1alpha1.NodePhaseCompleted,
+ Message: "event_source_has_been_stopped",
+ Name: eventSource.source.Name,
+ Id: eventSource.source.Id,
+ }
+ return
+ }
+
+ logger.WithError(err).Errorln("failed to receive event from stream")
+ gatewayContext.statusCh <- EventSourceStatus{
+ Phase: v1alpha1.NodePhaseError,
+ Message: "failed_to_receive_event_from_event_source_stream",
+ Name: eventSource.source.Name,
+ Id: eventSource.source.Id,
+ }
+ return
+ }
+ err = gatewayContext.DispatchEvent(event)
+ if err != nil {
+ // escalate error through a K8s event
+ labels := map[string]string{
+ common.LabelEventType: string(common.EscalationEventType),
+ common.LabelEventSourceName: eventSource.source.Name,
+ common.LabelResourceName: gatewayContext.name,
+ common.LabelEventSourceID: eventSource.source.Id,
+ common.LabelOperation: "dispatch_event_to_watchers",
+ }
+ if err := common.GenerateK8sEvent(gatewayContext.k8sClient, fmt.Sprintf("failed to dispatch event to watchers"), common.EscalationEventType, "event dispatch failed", gatewayContext.name, gatewayContext.namespace, gatewayContext.controllerInstanceID, gateway.Kind, labels); err != nil {
+ logger.WithError(err).Errorln("failed to create K8s event to escalate event dispatch failure")
+ }
+ logger.WithError(err).Errorln("failed to dispatch event to watchers")
+ }
+ }
+ }()
+ }
+}
+
+// deactivateEventSources inactivate an existing event sources
+func (gatewayContext *GatewayContext) deactivateEventSources(eventSourceNames []string) {
+ for _, eventSourceName := range eventSourceNames {
+ eventSource := gatewayContext.eventSourceContexts[eventSourceName]
+ if eventSource == nil {
+ continue
+ }
+
+ logger := gatewayContext.logger.WithField(common.LabelEventSource, eventSourceName)
+
+ logger.WithField(common.LabelEventSource, eventSource.source.Name).Infoln("stopping the event source")
+ delete(gatewayContext.eventSourceContexts, eventSourceName)
+ gatewayContext.statusCh <- EventSourceStatus{
+ Phase: v1alpha1.NodePhaseRemove,
+ Id: eventSource.source.Id,
+ Message: "event_source_is_removed",
+ Name: eventSource.source.Name,
+ }
+ eventSource.cancel()
+ if err := eventSource.conn.Close(); err != nil {
+ logger.WithField(common.LabelEventSource, eventSource.source.Name).WithError(err).Errorln("failed to close client connection")
+ }
+ }
+}
+
+// syncEventSources syncs active event-sources and the updated ones
+func (gatewayContext *GatewayContext) syncEventSources(eventSource *eventSourceV1Alpha1.EventSource) error {
+ eventSourceContexts := gatewayContext.initEventSourceContexts(eventSource)
+
+ staleEventSources, newEventSources := gatewayContext.diffEventSources(eventSourceContexts)
+ gatewayContext.logger.WithField(common.LabelEventSource, staleEventSources).Infoln("deleted event sources")
+ gatewayContext.logger.WithField(common.LabelEventSource, newEventSources).Infoln("new event sources")
+
+ // stop existing event sources
+ gatewayContext.deactivateEventSources(staleEventSources)
+
+ // start new event sources
+ gatewayContext.activateEventSources(eventSourceContexts, newEventSources)
+
+ return nil
+}
+
+// initEventSourceContext creates an internal representation of event sources.
+func (gatewayContext *GatewayContext) initEventSourceContexts(eventSource *eventSourceV1Alpha1.EventSource) map[string]*EventSourceContext {
+ eventSourceContexts := make(map[string]*EventSourceContext)
+
+ switch gatewayContext.gateway.Spec.Type {
+ case apicommon.SNSEvent:
+ for key, value := range eventSource.Spec.SNS {
+ gatewayContext.populateEventSourceContexts(key, value, eventSourceContexts)
+ }
+ case apicommon.SQSEvent:
+ for key, value := range eventSource.Spec.SQS {
+ gatewayContext.populateEventSourceContexts(key, value, eventSourceContexts)
+ }
+ case apicommon.PubSubEvent:
+ for key, value := range eventSource.Spec.PubSub {
+ gatewayContext.populateEventSourceContexts(key, value, eventSourceContexts)
+ }
+ case apicommon.NATSEvent:
+ for key, value := range eventSource.Spec.NATS {
+ gatewayContext.populateEventSourceContexts(key, value, eventSourceContexts)
+ }
+ case apicommon.FileEvent:
+ for key, value := range eventSource.Spec.File {
+ gatewayContext.populateEventSourceContexts(key, value, eventSourceContexts)
+ }
+ case apicommon.CalendarEvent:
+ for key, value := range eventSource.Spec.Calendar {
+ gatewayContext.populateEventSourceContexts(key, value, eventSourceContexts)
+ }
+ case apicommon.AMQPEvent:
+ for key, value := range eventSource.Spec.AMQP {
+ gatewayContext.populateEventSourceContexts(key, value, eventSourceContexts)
+ }
+ case apicommon.GitHubEvent:
+ for key, value := range eventSource.Spec.Github {
+ gatewayContext.populateEventSourceContexts(key, value, eventSourceContexts)
+ }
+ case apicommon.GitLabEvent:
+ for key, value := range eventSource.Spec.Gitlab {
+ gatewayContext.populateEventSourceContexts(key, value, eventSourceContexts)
+ }
+ case apicommon.HDFSEvent:
+ for key, value := range eventSource.Spec.HDFS {
+ gatewayContext.populateEventSourceContexts(key, value, eventSourceContexts)
+ }
+ case apicommon.KafkaEvent:
+ for key, value := range eventSource.Spec.Kafka {
+ gatewayContext.populateEventSourceContexts(key, value, eventSourceContexts)
+ }
+ case apicommon.MinioEvent:
+ for key, value := range eventSource.Spec.Minio {
+ gatewayContext.populateEventSourceContexts(key, value, eventSourceContexts)
+ }
+ case apicommon.MQTTEvent:
+ for key, value := range eventSource.Spec.MQTT {
+ gatewayContext.populateEventSourceContexts(key, value, eventSourceContexts)
+ }
+ case apicommon.ResourceEvent:
+ for key, value := range eventSource.Spec.Resource {
+ gatewayContext.populateEventSourceContexts(key, value, eventSourceContexts)
+ }
+ case apicommon.SlackEvent:
+ for key, value := range eventSource.Spec.Slack {
+ gatewayContext.populateEventSourceContexts(key, value, eventSourceContexts)
+ }
+ case apicommon.StorageGridEvent:
+ for key, value := range eventSource.Spec.StorageGrid {
+ gatewayContext.populateEventSourceContexts(key, value, eventSourceContexts)
+ }
+ case apicommon.WebhookEvent:
+ for key, value := range eventSource.Spec.Webhook {
+ gatewayContext.populateEventSourceContexts(key, value, eventSourceContexts)
+ }
+ }
+
+ return eventSourceContexts
+}
diff --git a/gateways/state.go b/gateways/client/state.go
similarity index 50%
rename from gateways/state.go
rename to gateways/client/state.go
index c292f89cb5..007bf8eb93 100644
--- a/gateways/state.go
+++ b/gateways/client/state.go
@@ -14,15 +14,14 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package gateways
+package main
import (
- gtw "github.com/argoproj/argo-events/controllers/gateway"
"time"
- "github.com/argoproj/argo-events/pkg/apis/gateway"
-
"github.com/argoproj/argo-events/common"
+ gtw "github.com/argoproj/argo-events/controllers/gateway"
+ "github.com/argoproj/argo-events/pkg/apis/gateway"
"github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -38,38 +37,38 @@ type EventSourceStatus struct {
// Phase of the event source
Phase v1alpha1.NodePhase
// Gateway reference
- Gw *v1alpha1.Gateway
+ Gateway *v1alpha1.Gateway
}
// markGatewayNodePhase marks the node with a phase, returns the node
-func (gc *GatewayConfig) markGatewayNodePhase(nodeStatus *EventSourceStatus) *v1alpha1.NodeStatus {
- log := gc.Log.WithFields(
+func (gatewayContext *GatewayContext) markGatewayNodePhase(nodeStatus *EventSourceStatus) *v1alpha1.NodeStatus {
+ logger := gatewayContext.logger.WithFields(
map[string]interface{}{
common.LabelNodeName: nodeStatus.Name,
common.LabelPhase: string(nodeStatus.Phase),
},
)
- log.Info("marking node phase")
+ logger.Infoln("marking node phase")
- node := gc.getNodeByID(nodeStatus.Id)
+ node := gatewayContext.getNodeByID(nodeStatus.Id)
if node == nil {
- log.Warn("node is not initialized")
+ logger.Warnln("node is not initialized")
return nil
}
if node.Phase != nodeStatus.Phase {
- log.WithField("new-phase", string(nodeStatus.Phase)).Info("phase updated")
+ logger.WithField("new-phase", string(nodeStatus.Phase)).Infoln("phase updated")
node.Phase = nodeStatus.Phase
}
node.Message = nodeStatus.Message
- gc.gw.Status.Nodes[node.ID] = *node
- gc.updated = true
+ gatewayContext.gateway.Status.Nodes[node.ID] = *node
+ gatewayContext.updated = true
return node
}
// getNodeByName returns the node from this gateway for the nodeName
-func (gc *GatewayConfig) getNodeByID(nodeID string) *v1alpha1.NodeStatus {
- node, ok := gc.gw.Status.Nodes[nodeID]
+func (gatewayContext *GatewayContext) getNodeByID(nodeID string) *v1alpha1.NodeStatus {
+ node, ok := gatewayContext.gateway.Status.Nodes[nodeID]
if !ok {
return nil
}
@@ -77,12 +76,14 @@ func (gc *GatewayConfig) getNodeByID(nodeID string) *v1alpha1.NodeStatus {
}
// create a new node
-func (gc *GatewayConfig) initializeNode(nodeID string, nodeName string, messages string) v1alpha1.NodeStatus {
- if gc.gw.Status.Nodes == nil {
- gc.gw.Status.Nodes = make(map[string]v1alpha1.NodeStatus)
+func (gatewayContext *GatewayContext) initializeNode(nodeID string, nodeName string, messages string) v1alpha1.NodeStatus {
+ if gatewayContext.gateway.Status.Nodes == nil {
+ gatewayContext.gateway.Status.Nodes = make(map[string]v1alpha1.NodeStatus)
}
- gc.Log.WithField(common.LabelNodeName, nodeName).Info("node")
- node, ok := gc.gw.Status.Nodes[nodeID]
+
+ gatewayContext.logger.WithField(common.LabelNodeName, nodeName).Infoln("node")
+
+ node, ok := gatewayContext.gateway.Status.Nodes[nodeID]
if !ok {
node = v1alpha1.NodeStatus{
ID: nodeID,
@@ -93,66 +94,68 @@ func (gc *GatewayConfig) initializeNode(nodeID string, nodeName string, messages
}
node.Phase = v1alpha1.NodePhaseRunning
node.Message = messages
- gc.gw.Status.Nodes[nodeID] = node
- gc.Log.WithFields(
+ gatewayContext.gateway.Status.Nodes[nodeID] = node
+
+ gatewayContext.logger.WithFields(
map[string]interface{}{
common.LabelNodeName: nodeName,
"node-message": node.Message,
},
- ).Info("node is running")
- gc.updated = true
+ ).Infoln("node is running")
+
+ gatewayContext.updated = true
return node
}
-// UpdateGatewayResourceState updates gateway resource nodes state
-func (gc *GatewayConfig) UpdateGatewayResourceState(status *EventSourceStatus) {
- log := gc.Log
+// UpdateGatewayState updates gateway resource nodes state
+func (gatewayContext *GatewayContext) UpdateGatewayState(status *EventSourceStatus) {
+ logger := gatewayContext.logger
if status.Phase != v1alpha1.NodePhaseResourceUpdate {
- log = log.WithField(common.LabelEventSource, status.Name).Logger
+ logger = logger.WithField(common.LabelEventSource, status.Name).Logger
}
- log.Info("received a gateway state update notification")
+ logger.Infoln("received a gateway state update notification")
switch status.Phase {
case v1alpha1.NodePhaseRunning:
// init the node and mark it as running
- gc.initializeNode(status.Id, status.Name, status.Message)
+ gatewayContext.initializeNode(status.Id, status.Name, status.Message)
case v1alpha1.NodePhaseCompleted, v1alpha1.NodePhaseError:
- gc.markGatewayNodePhase(status)
+ gatewayContext.markGatewayNodePhase(status)
case v1alpha1.NodePhaseResourceUpdate:
- gc.gw = status.Gw
+ gatewayContext.gateway = status.Gateway
case v1alpha1.NodePhaseRemove:
- delete(gc.gw.Status.Nodes, status.Id)
- log.Info("event source is removed")
- gc.updated = true
+ delete(gatewayContext.gateway.Status.Nodes, status.Id)
+ logger.Infoln("event source is removed")
+ gatewayContext.updated = true
}
- if gc.updated {
+ if gatewayContext.updated {
// persist changes and create K8s event logging the change
eventType := common.StateChangeEventType
labels := map[string]string{
- common.LabelGatewayEventSourceName: status.Name,
- common.LabelGatewayName: gc.Name,
- common.LabelGatewayEventSourceID: status.Id,
- common.LabelOperation: "persist_event_source_state",
+ common.LabelEventSourceName: status.Name,
+ common.LabelResourceName: gatewayContext.name,
+ common.LabelEventSourceID: status.Id,
+ common.LabelOperation: "persist_event_source_state",
}
- updatedGw, err := gtw.PersistUpdates(gc.gwcs, gc.gw, gc.Log)
+ updatedGw, err := gtw.PersistUpdates(gatewayContext.gatewayClient, gatewayContext.gateway, gatewayContext.logger)
if err != nil {
- log.WithError(err).Error("failed to persist gateway resource updates, reverting to old state")
+ logger.WithError(err).Errorln("failed to persist gateway resource updates, reverting to old state")
eventType = common.EscalationEventType
}
// update gateway ref. in case of failure to persist updates, this is a deep copy of old gateway resource
- gc.gw = updatedGw
+ gatewayContext.gateway = updatedGw
labels[common.LabelEventType] = string(eventType)
// generate a K8s event for persist event source state change
- if err := common.GenerateK8sEvent(gc.Clientset, status.Message, eventType, "event source state update", gc.Name, gc.Namespace, gc.controllerInstanceID, gateway.Kind, labels); err != nil {
- log.WithError(err).Error("failed to create K8s event to log event source state change")
+ if err := common.GenerateK8sEvent(gatewayContext.k8sClient, status.Message, eventType, "event source state update", gatewayContext.name, gatewayContext.namespace, gatewayContext.controllerInstanceID, gateway.Kind, labels); err != nil {
+ logger.WithError(err).Errorln("failed to create K8s event to log event source state change")
}
}
- gc.updated = false
+ gatewayContext.updated = false
}
diff --git a/gateways/state_test.go b/gateways/client/state_test.go
similarity index 75%
rename from gateways/state_test.go
rename to gateways/client/state_test.go
index 82fb3545d5..d389582a8d 100644
--- a/gateways/state_test.go
+++ b/gateways/client/state_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package gateways
+package main
import (
"github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
@@ -23,26 +23,26 @@ import (
)
func TestGatewayState(t *testing.T) {
- gc := getGatewayConfig()
+ gc := getGatewayContext()
convey.Convey("Given a gateway", t, func() {
convey.Convey("Create the gateway", func() {
var err error
- gc.gw, err = gc.gwcs.ArgoprojV1alpha1().Gateways(gc.gw.Namespace).Create(gc.gw)
+ gc.gateway, err = gc.gatewayClient.ArgoprojV1alpha1().Gateways(gc.gateway.Namespace).Create(gc.gateway)
convey.So(err, convey.ShouldBeNil)
})
convey.Convey("Update gateway resource test-node node state to running", func() {
- gc.UpdateGatewayResourceState(&EventSourceStatus{
+ gc.UpdateGatewayState(&EventSourceStatus{
Phase: v1alpha1.NodePhaseRunning,
Name: "test-node",
Message: "node is marked as running",
Id: "test-node",
})
- convey.So(len(gc.gw.Status.Nodes), convey.ShouldEqual, 1)
- convey.So(gc.gw.Status.Nodes["test-node"].Phase, convey.ShouldEqual, v1alpha1.NodePhaseRunning)
+ convey.So(len(gc.gateway.Status.Nodes), convey.ShouldEqual, 1)
+ convey.So(gc.gateway.Status.Nodes["test-node"].Phase, convey.ShouldEqual, v1alpha1.NodePhaseRunning)
})
- updatedGw := gc.gw
+ updatedGw := gc.gateway
updatedGw.Spec.Watchers = &v1alpha1.NotificationWatchers{
Sensors: []v1alpha1.SensorNotificationWatcher{
{
@@ -52,49 +52,49 @@ func TestGatewayState(t *testing.T) {
}
convey.Convey("Update gateway watchers", func() {
- gc.UpdateGatewayResourceState(&EventSourceStatus{
+ gc.UpdateGatewayState(&EventSourceStatus{
Phase: v1alpha1.NodePhaseResourceUpdate,
Name: "test-node",
Message: "gateway resource is updated",
Id: "test-node",
- Gw: updatedGw,
+ Gateway: updatedGw,
})
- convey.So(len(gc.gw.Spec.Watchers.Sensors), convey.ShouldEqual, 1)
+ convey.So(len(gc.gateway.Spec.Watchers.Sensors), convey.ShouldEqual, 1)
})
convey.Convey("Update gateway resource test-node node state to completed", func() {
- gc.UpdateGatewayResourceState(&EventSourceStatus{
+ gc.UpdateGatewayState(&EventSourceStatus{
Phase: v1alpha1.NodePhaseCompleted,
Name: "test-node",
Message: "node is marked completed",
Id: "test-node",
})
- convey.So(gc.gw.Status.Nodes["test-node"].Phase, convey.ShouldEqual, v1alpha1.NodePhaseCompleted)
+ convey.So(gc.gateway.Status.Nodes["test-node"].Phase, convey.ShouldEqual, v1alpha1.NodePhaseCompleted)
})
convey.Convey("Remove gateway resource test-node node", func() {
- gc.UpdateGatewayResourceState(&EventSourceStatus{
+ gc.UpdateGatewayState(&EventSourceStatus{
Phase: v1alpha1.NodePhaseRemove,
Name: "test-node",
Message: "node is removed",
Id: "test-node",
})
- convey.So(len(gc.gw.Status.Nodes), convey.ShouldEqual, 0)
+ convey.So(len(gc.gateway.Status.Nodes), convey.ShouldEqual, 0)
})
})
}
func TestMarkGatewayNodePhase(t *testing.T) {
convey.Convey("Given a node status, mark node state", t, func() {
- gc := getGatewayConfig()
+ gc := getGatewayContext()
nodeStatus := &EventSourceStatus{
Name: "fake",
Id: "1234",
Message: "running",
Phase: v1alpha1.NodePhaseRunning,
- Gw: gc.gw,
+ Gateway: gc.gateway,
}
- gc.gw.Status.Nodes = map[string]v1alpha1.NodeStatus{
+ gc.gateway.Status.Nodes = map[string]v1alpha1.NodeStatus{
"1234": v1alpha1.NodeStatus{
Phase: v1alpha1.NodePhaseNew,
Message: "init",
@@ -107,7 +107,7 @@ func TestMarkGatewayNodePhase(t *testing.T) {
convey.So(resultStatus, convey.ShouldNotBeNil)
convey.So(resultStatus.Name, convey.ShouldEqual, nodeStatus.Name)
- gc.gw.Status.Nodes = map[string]v1alpha1.NodeStatus{
+ gc.gateway.Status.Nodes = map[string]v1alpha1.NodeStatus{
"4567": v1alpha1.NodeStatus{
Phase: v1alpha1.NodePhaseNew,
Message: "init",
@@ -123,8 +123,8 @@ func TestMarkGatewayNodePhase(t *testing.T) {
func TestGetNodeByID(t *testing.T) {
convey.Convey("Given a node id, retrieve the node", t, func() {
- gc := getGatewayConfig()
- gc.gw.Status.Nodes = map[string]v1alpha1.NodeStatus{
+ gc := getGatewayContext()
+ gc.gateway.Status.Nodes = map[string]v1alpha1.NodeStatus{
"1234": v1alpha1.NodeStatus{
Phase: v1alpha1.NodePhaseNew,
Message: "init",
@@ -140,12 +140,12 @@ func TestGetNodeByID(t *testing.T) {
func TestInitializeNode(t *testing.T) {
convey.Convey("Given a node, initialize it", t, func() {
- gc := getGatewayConfig()
+ gc := getGatewayContext()
status := gc.initializeNode("1234", "fake", "init")
convey.So(status, convey.ShouldNotBeNil)
convey.So(status.ID, convey.ShouldEqual, "1234")
convey.So(status.Name, convey.ShouldEqual, "fake")
convey.So(status.Message, convey.ShouldEqual, "init")
- convey.So(len(gc.gw.Status.Nodes), convey.ShouldEqual, 1)
+ convey.So(len(gc.gateway.Status.Nodes), convey.ShouldEqual, 1)
})
}
diff --git a/gateways/client/transformer.go b/gateways/client/transformer.go
new file mode 100644
index 0000000000..6bb806f5d0
--- /dev/null
+++ b/gateways/client/transformer.go
@@ -0,0 +1,165 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "net"
+ "net/http"
+ "time"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ pc "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/google/uuid"
+ "github.com/pkg/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// DispatchEvent dispatches event to gateway transformer for further processing
+func (gatewayContext *GatewayContext) DispatchEvent(gatewayEvent *gateways.Event) error {
+ transformedEvent, err := gatewayContext.transformEvent(gatewayEvent)
+ if err != nil {
+ return err
+ }
+
+ payload, err := json.Marshal(transformedEvent)
+ if err != nil {
+ return errors.Errorf("failed to dispatch event to watchers over http. marshalling failed. err: %+v", err)
+ }
+
+ switch gatewayContext.gateway.Spec.EventProtocol.Type {
+ case pc.HTTP:
+ if err = gatewayContext.dispatchEventOverHttp(transformedEvent.Context.Source.Host, payload); err != nil {
+ return err
+ }
+ case pc.NATS:
+ if err = gatewayContext.dispatchEventOverNats(transformedEvent.Context.Source.Host, payload); err != nil {
+ return err
+ }
+ default:
+ return errors.Errorf("unknown dispatch mechanism %s", gatewayContext.gateway.Spec.EventProtocol.Type)
+ }
+ return nil
+}
+
+// transformEvent transforms an event from event source into a CloudEvents specification compliant event
+// See https://github.com/cloudevents/spec for more info.
+func (gatewayContext *GatewayContext) transformEvent(gatewayEvent *gateways.Event) (*apicommon.Event, error) {
+ logger := gatewayContext.logger.WithField(common.LabelEventSource, gatewayEvent.Name)
+
+ logger.Infoln("converting gateway event into cloudevents specification compliant event")
+
+ // Create an CloudEvent
+ ce := &apicommon.Event{
+ Context: apicommon.EventContext{
+ CloudEventsVersion: common.CloudEventsVersion,
+ EventID: fmt.Sprintf("%x", uuid.New()),
+ ContentType: "application/json",
+ EventTime: metav1.MicroTime{Time: time.Now().UTC()},
+ EventType: string(gatewayContext.gateway.Spec.Type),
+ Source: &apicommon.URI{
+ Host: common.DefaultEventSourceName(gatewayContext.gateway.Name, gatewayEvent.Name),
+ },
+ },
+ Payload: gatewayEvent.Payload,
+ }
+
+ logger.Infoln("event has been transformed into cloud event")
+ return ce, nil
+}
+
+// dispatchEventOverHttp dispatches event to watchers over http.
+func (gatewayContext *GatewayContext) dispatchEventOverHttp(source string, eventPayload []byte) error {
+ gatewayContext.logger.WithField(common.LabelEventSource, source).Infoln("dispatching event to watchers")
+
+ completeSuccess := true
+
+ for _, sensor := range gatewayContext.gateway.Spec.Watchers.Sensors {
+ namespace := gatewayContext.namespace
+ if sensor.Namespace != "" {
+ namespace = sensor.Namespace
+ }
+ if err := gatewayContext.postCloudEventToWatcher(common.ServiceDNSName(sensor.Name, namespace), gatewayContext.gateway.Spec.EventProtocol.Http.Port, common.SensorServiceEndpoint, eventPayload); err != nil {
+ gatewayContext.logger.WithField(common.LabelSensorName, sensor.Name).WithError(err).Warnln("failed to dispatch event to sensor watcher over http. communication error")
+ completeSuccess = false
+ }
+ }
+ for _, gateway := range gatewayContext.gateway.Spec.Watchers.Gateways {
+ namespace := gatewayContext.namespace
+ if gateway.Namespace != "" {
+ namespace = gateway.Namespace
+ }
+ if err := gatewayContext.postCloudEventToWatcher(common.ServiceDNSName(gateway.Name, namespace), gateway.Port, gateway.Endpoint, eventPayload); err != nil {
+ gatewayContext.logger.WithField(common.LabelResourceName, gateway.Name).WithError(err).Warnln("failed to dispatch event to gateway watcher over http. communication error")
+ completeSuccess = false
+ }
+ }
+
+ response := "dispatched event to all watchers"
+ if !completeSuccess {
+ response = fmt.Sprintf("%s.%s", response, " although some of the dispatch operations failed, check logs for more info")
+ }
+
+ gatewayContext.logger.Infoln(response)
+ return nil
+}
+
+// dispatchEventOverNats dispatches event over nats
+func (gatewayContext *GatewayContext) dispatchEventOverNats(source string, eventPayload []byte) error {
+ var err error
+
+ switch gatewayContext.gateway.Spec.EventProtocol.Nats.Type {
+ case pc.Standard:
+ err = gatewayContext.natsConn.Publish(source, eventPayload)
+ case pc.Streaming:
+ err = gatewayContext.natsStreamingConn.Publish(source, eventPayload)
+ }
+
+ if err != nil {
+ gatewayContext.logger.WithField(common.LabelEventSource, source).WithError(err).Errorln("failed to publish event")
+ return err
+ }
+
+ gatewayContext.logger.WithField(common.LabelEventSource, source).Infoln("event published successfully")
+ return nil
+}
+
+// postCloudEventToWatcher makes a HTTP POST call to watcher's service
+func (gatewayContext *GatewayContext) postCloudEventToWatcher(host string, port string, endpoint string, payload []byte) error {
+ req, err := http.NewRequest("POST", fmt.Sprintf("http://%s:%s%s", host, port, endpoint), bytes.NewBuffer(payload))
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "application/json")
+
+ client := &http.Client{
+ Timeout: 20 * time.Second,
+ Transport: &http.Transport{
+ Dial: (&net.Dialer{
+ KeepAlive: 600 * time.Second,
+ }).Dial,
+ MaxIdleConns: 100,
+ MaxIdleConnsPerHost: 50,
+ },
+ }
+ _, err = client.Do(req)
+ return err
+}
diff --git a/gateways/transformer_test.go b/gateways/client/transformer_test.go
similarity index 82%
rename from gateways/transformer_test.go
rename to gateways/client/transformer_test.go
index e0bd2f761d..843dc0b37c 100644
--- a/gateways/transformer_test.go
+++ b/gateways/client/transformer_test.go
@@ -14,14 +14,16 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package gateways
+package main
import (
"fmt"
+ "testing"
+
+ "github.com/argoproj/argo-events/gateways"
"github.com/argoproj/argo-events/pkg/apis/common"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/mock"
- "testing"
)
type MyMockedObject struct {
@@ -38,8 +40,8 @@ func (m *MyMockedObject) dispatchEventOverNats(source string, eventPayload []byt
func TestDispatchEvent(t *testing.T) {
convey.Convey("Given an event, dispatch it to sensor", t, func() {
- gc := getGatewayConfig()
- event := &Event{
+ gc := getGatewayContext()
+ event := &gateways.Event{
Name: "fake",
Payload: []byte("fake"),
}
@@ -50,17 +52,17 @@ func TestDispatchEvent(t *testing.T) {
err := gc.DispatchEvent(event)
convey.So(err, convey.ShouldBeNil)
- gc.gw.Spec.EventProtocol.Type = common.NATS
+ gc.gateway.Spec.EventProtocol.Type = common.NATS
err = gc.DispatchEvent(event)
convey.So(err, convey.ShouldBeNil)
- gc.gw.Spec.EventProtocol.Type = common.NATS
+ gc.gateway.Spec.EventProtocol.Type = common.NATS
err = gc.DispatchEvent(event)
convey.So(err, convey.ShouldBeNil)
- gc.gw.Spec.EventProtocol.Type = common.EventProtocolType("fake")
+ gc.gateway.Spec.EventProtocol.Type = common.EventProtocolType("fake")
err = gc.DispatchEvent(event)
convey.So(err, convey.ShouldNotBeNil)
})
@@ -68,13 +70,13 @@ func TestDispatchEvent(t *testing.T) {
func TestTransformEvent(t *testing.T) {
convey.Convey("Given a gateway event, convert it into cloud event", t, func() {
- gc := getGatewayConfig()
- ce, err := gc.transformEvent(&Event{
+ gc := getGatewayContext()
+ ce, err := gc.transformEvent(&gateways.Event{
Name: "fake",
Payload: []byte("fake"),
})
convey.So(err, convey.ShouldBeNil)
convey.So(ce, convey.ShouldNotBeNil)
- convey.So(ce.Context.Source.Host, convey.ShouldEqual, fmt.Sprintf("%s:%s", gc.gw.Name, "fake"))
+ convey.So(ce.Context.Source.Host, convey.ShouldEqual, fmt.Sprintf("%s:%s", gc.gateway.Name, "fake"))
})
}
diff --git a/gateways/watcher.go b/gateways/client/watcher.go
similarity index 54%
rename from gateways/watcher.go
rename to gateways/client/watcher.go
index 253c8c0b27..0b3bfba35a 100644
--- a/gateways/watcher.go
+++ b/gateways/client/watcher.go
@@ -14,16 +14,15 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package gateways
+package main
import (
"context"
"fmt"
- "github.com/argoproj/argo-events/common"
- "strings"
+ "github.com/argoproj/argo-events/common"
+ eventSourceV1Alpha1 "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
"github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
- corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
@@ -31,37 +30,29 @@ import (
"k8s.io/client-go/tools/cache"
)
-// WatchGatewayEventSources watches change in configuration for the gateway
-func (gc *GatewayConfig) WatchGatewayEventSources(ctx context.Context) (cache.Controller, error) {
- source := gc.newConfigMapWatch(gc.configName)
+// WatchGatewayEventSources watches change in event source for the gateway
+func (gatewayContext *GatewayContext) WatchGatewayEventSources(ctx context.Context) (cache.Controller, error) {
+ source := gatewayContext.newEventSourceWatch(gatewayContext.eventSourceRef)
_, controller := cache.NewInformer(
source,
- &corev1.ConfigMap{},
+ &eventSourceV1Alpha1.EventSource{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
- if newCm, ok := obj.(*corev1.ConfigMap); ok {
- if err := common.CheckEventSourceVersion(newCm); err != nil {
- gc.Log.WithField("name", newCm.Name).Error(err)
- } else {
- gc.Log.WithField("name", newCm.Name).Info("detected configmap addition")
- err := gc.manageEventSources(newCm)
- if err != nil {
- gc.Log.WithError(err).Error("add config failed")
- }
+ if newEventSource, ok := obj.(*eventSourceV1Alpha1.EventSource); ok {
+ gatewayContext.logger.WithField(common.LabelEventSource, newEventSource.Name).Infoln("detected a new event-source...")
+ err := gatewayContext.syncEventSources(newEventSource)
+ if err != nil {
+ gatewayContext.logger.WithField(common.LabelEventSource, newEventSource.Name).WithError(err).Errorln("failed to process the event-source reference")
}
}
},
UpdateFunc: func(old, new interface{}) {
- if cm, ok := new.(*corev1.ConfigMap); ok {
- if err := common.CheckEventSourceVersion(cm); err != nil {
- gc.Log.WithField("name", cm.Name).Error(err)
- } else {
- gc.Log.Info("detected EventSource update. Updating the controller run config.")
- err := gc.manageEventSources(cm)
- if err != nil {
- gc.Log.WithError(err).Error("update config failed")
- }
+ if eventSource, ok := new.(*eventSourceV1Alpha1.EventSource); ok {
+ gatewayContext.logger.WithField(common.LabelEventSource, eventSource.Name).Info("detected event-source update...")
+ err := gatewayContext.syncEventSources(eventSource)
+ if err != nil {
+ gatewayContext.logger.WithField(common.LabelEventSource, eventSource.Name).WithError(err).Error("failed to process event source update")
}
}
},
@@ -71,22 +62,21 @@ func (gc *GatewayConfig) WatchGatewayEventSources(ctx context.Context) (cache.Co
return controller, nil
}
-// newConfigMapWatch creates a new configmap watcher
-func (gc *GatewayConfig) newConfigMapWatch(name string) *cache.ListWatch {
- x := gc.Clientset.CoreV1().RESTClient()
- resource := "configmaps"
- namespace := gc.Namespace
- if strings.Contains(name, "/") {
- parts := strings.SplitN(name, "/", 2)
- namespace = parts[0]
- name = parts[1]
+// newEventSourceWatch creates a new event source watcher
+func (gatewayContext *GatewayContext) newEventSourceWatch(eventSourceRef *v1alpha1.EventSourceRef) *cache.ListWatch {
+ client := gatewayContext.eventSourceClient.ArgoprojV1alpha1().RESTClient()
+ resource := "eventsources"
+
+ if eventSourceRef.Namespace == "" {
+ eventSourceRef.Namespace = gatewayContext.namespace
}
- fieldSelector := fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", name))
+
+ fieldSelector := fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", eventSourceRef.Name))
listFunc := func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fieldSelector.String()
- req := x.Get().
- Namespace(namespace).
+ req := client.Get().
+ Namespace(eventSourceRef.Namespace).
Resource(resource).
VersionedParams(&options, metav1.ParameterCodec)
return req.Do().Get()
@@ -94,8 +84,8 @@ func (gc *GatewayConfig) newConfigMapWatch(name string) *cache.ListWatch {
watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
options.Watch = true
options.FieldSelector = fieldSelector.String()
- req := x.Get().
- Namespace(namespace).
+ req := client.Get().
+ Namespace(eventSourceRef.Namespace).
Resource(resource).
VersionedParams(&options, metav1.ParameterCodec)
return req.Watch()
@@ -103,10 +93,9 @@ func (gc *GatewayConfig) newConfigMapWatch(name string) *cache.ListWatch {
return &cache.ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}
}
-// WatchGateway watches for changes in the gateway resource
-// This will act as replacement for old gateway-transformer-configmap. Changes to watchers, event version and event type will be reflected.
-func (gc *GatewayConfig) WatchGateway(ctx context.Context) (cache.Controller, error) {
- source := gc.newGatewayWatch(gc.Name)
+// WatchGatewayUpdates watches for changes in the gateway resource
+func (gatewayContext *GatewayContext) WatchGatewayUpdates(ctx context.Context) (cache.Controller, error) {
+ source := gatewayContext.newGatewayWatch(gatewayContext.name)
_, controller := cache.NewInformer(
source,
&v1alpha1.Gateway{},
@@ -114,10 +103,10 @@ func (gc *GatewayConfig) WatchGateway(ctx context.Context) (cache.Controller, er
cache.ResourceEventHandlerFuncs{
UpdateFunc: func(old, new interface{}) {
if g, ok := new.(*v1alpha1.Gateway); ok {
- gc.Log.Info("detected gateway update. updating gateway watchers")
- gc.StatusCh <- EventSourceStatus{
+ gatewayContext.logger.Info("detected gateway update. updating gateway watchers")
+ gatewayContext.statusCh <- EventSourceStatus{
Phase: v1alpha1.NodePhaseResourceUpdate,
- Gw: g,
+ Gateway: g,
Message: "gateway_resource_update",
}
}
@@ -129,15 +118,15 @@ func (gc *GatewayConfig) WatchGateway(ctx context.Context) (cache.Controller, er
}
// newGatewayWatch creates a new gateway watcher
-func (gc *GatewayConfig) newGatewayWatch(name string) *cache.ListWatch {
- x := gc.gwcs.ArgoprojV1alpha1().RESTClient()
+func (gatewayContext *GatewayContext) newGatewayWatch(name string) *cache.ListWatch {
+ x := gatewayContext.gatewayClient.ArgoprojV1alpha1().RESTClient()
resource := "gateways"
fieldSelector := fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", name))
listFunc := func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fieldSelector.String()
req := x.Get().
- Namespace(gc.Namespace).
+ Namespace(gatewayContext.namespace).
Resource(resource).
VersionedParams(&options, metav1.ParameterCodec)
return req.Do().Get()
@@ -146,7 +135,7 @@ func (gc *GatewayConfig) newGatewayWatch(name string) *cache.ListWatch {
options.Watch = true
options.FieldSelector = fieldSelector.String()
req := x.Get().
- Namespace(gc.Namespace).
+ Namespace(gatewayContext.namespace).
Resource(resource).
VersionedParams(&options, metav1.ParameterCodec)
return req.Watch()
diff --git a/gateways/common.go b/gateways/common.go
new file mode 100644
index 0000000000..2b39dd84f4
--- /dev/null
+++ b/gateways/common.go
@@ -0,0 +1,14 @@
+package gateways
+
+// Gateway constants
+const (
+ // LabelEventSourceName is the label for a event source in gateway
+ LabelEventSourceName = "event-source-name"
+ // LabelEventSourceID is the label for gateway configuration ID
+ LabelEventSourceID = "event-source-id"
+ EnvVarGatewayServerPort = "GATEWAY_SERVER_PORT"
+ // Server Connection Timeout, 10 seconds
+ ServerConnTimeout = 10
+ // EventSourceDir
+ EventSourceDir = "../../../examples/event-sources"
+)
diff --git a/gateways/common/fake.go b/gateways/common/fake.go
deleted file mode 100644
index e0f0db6aba..0000000000
--- a/gateways/common/fake.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package common
-
-import (
- "context"
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "google.golang.org/grpc/metadata"
- "net/http"
-)
-
-var Hook = &Webhook{
- Endpoint: "/fake",
- Port: "12000",
- URL: "test-url",
-}
-
-type FakeHttpWriter struct {
- HeaderStatus int
- Payload []byte
-}
-
-func (f *FakeHttpWriter) Header() http.Header {
- return http.Header{}
-}
-
-func (f *FakeHttpWriter) Write(body []byte) (int, error) {
- f.Payload = body
- return len(body), nil
-}
-
-func (f *FakeHttpWriter) WriteHeader(status int) {
- f.HeaderStatus = status
-}
-
-type FakeRouteConfig struct {
- route *Route
-}
-
-func (f *FakeRouteConfig) GetRoute() *Route {
- return f.route
-}
-
-func (f *FakeRouteConfig) RouteHandler(writer http.ResponseWriter, request *http.Request) {
-}
-
-func (f *FakeRouteConfig) PostStart() error {
- return nil
-}
-
-func (f *FakeRouteConfig) PostStop() error {
- return nil
-}
-
-func GetFakeRoute() *Route {
- logger := common.NewArgoEventsLogger()
- return &Route{
- Webhook: Hook,
- EventSource: &gateways.EventSource{
- Name: "fake-event-source",
- Data: "hello",
- Id: "123",
- },
- Logger: logger,
- StartCh: make(chan struct{}),
- }
-}
-
-type FakeGRPCStream struct {
- SentData *gateways.Event
- Ctx context.Context
-}
-
-func (f *FakeGRPCStream) Send(event *gateways.Event) error {
- f.SentData = event
- return nil
-}
-
-func (f *FakeGRPCStream) SetHeader(metadata.MD) error {
- return nil
-}
-
-func (f *FakeGRPCStream) SendHeader(metadata.MD) error {
- return nil
-}
-
-func (f *FakeGRPCStream) SetTrailer(metadata.MD) {
- return
-}
-
-func (f *FakeGRPCStream) Context() context.Context {
- return f.Ctx
-}
-
-func (f *FakeGRPCStream) SendMsg(m interface{}) error {
- return nil
-}
-
-func (f *FakeGRPCStream) RecvMsg(m interface{}) error {
- return nil
-}
diff --git a/gateways/common/validate.go b/gateways/common/validate.go
deleted file mode 100644
index feb662c40a..0000000000
--- a/gateways/common/validate.go
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package common
-
-import (
- "fmt"
- "github.com/argoproj/argo-events/gateways"
-)
-
-const EventSourceDir = "../../../examples/event-sources"
-
-var (
- ErrNilEventSource = fmt.Errorf("event source can't be nil")
-)
-
-func ValidateGatewayEventSource(eventSource *gateways.EventSource, version string, parseEventSource func(string) (interface{}, error), validateEventSource func(interface{}) error) (*gateways.ValidEventSource, error) {
- v := &gateways.ValidEventSource{}
- if eventSource.Version != version {
- v.Reason = fmt.Sprintf("event source version mismatch. gateway expects %s version, and provided version is %s", version, eventSource.Version)
- return v, nil
- }
- es, err := parseEventSource(eventSource.Data)
- if err != nil {
- v.Reason = fmt.Sprintf("failed to parse event source. err: %+v", err)
- return v, nil
- }
- if err := validateEventSource(es); err != nil {
- v.Reason = fmt.Sprintf("failed to validate event source. err: %+v", err)
- return v, nil
- }
- v.IsValid = true
- return v, nil
-}
diff --git a/gateways/common/webhook.go b/gateways/common/webhook.go
deleted file mode 100644
index c59e26a752..0000000000
--- a/gateways/common/webhook.go
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package common
-
-import (
- "fmt"
- "github.com/argoproj/argo-events/common"
- "github.com/sirupsen/logrus"
- "net/http"
- "strconv"
- "strings"
- "sync"
-
- "github.com/argoproj/argo-events/gateways"
-)
-
-// Webhook is a general purpose REST API
-type Webhook struct {
- // REST API endpoint
- Endpoint string `json:"endpoint" protobuf:"bytes,1,name=endpoint"`
- // Method is HTTP request method that indicates the desired action to be performed for a given resource.
- // See RFC7231 Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content
- Method string `json:"method" protobuf:"bytes,2,name=method"`
- // Port on which HTTP server is listening for incoming events.
- Port string `json:"port" protobuf:"bytes,3,name=port"`
- // URL is the url of the server.
- URL string `json:"url" protobuf:"bytes,4,name=url"`
- // ServerCertPath refers the file that contains the cert.
- ServerCertPath string `json:"serverCertPath,omitempty" protobuf:"bytes,4,opt,name=serverCertPath"`
- // ServerKeyPath refers the file that contains private key
- ServerKeyPath string `json:"serverKeyPath,omitempty" protobuf:"bytes,5,opt,name=serverKeyPath"`
-
- // srv holds reference to http server
- srv *http.Server
- mux *http.ServeMux
-}
-
-// WebhookHelper is a helper struct
-type WebhookHelper struct {
- // Mutex synchronizes ActiveServers
- Mutex sync.Mutex
- // ActiveServers keeps track of currently running http servers.
- ActiveServers map[string]*activeServer
- // ActiveEndpoints keep track of endpoints that are already registered with server and their status active or inactive
- ActiveEndpoints map[string]*Endpoint
- // RouteActivateChan handles assigning new route to server.
- RouteActivateChan chan RouteManager
- // RouteDeactivateChan handles deactivating existing route
- RouteDeactivateChan chan RouteManager
-}
-
-// HTTP Muxer
-type server struct {
- mux *http.ServeMux
-}
-
-// activeServer contains reference to server and an error channel that is shared across all functions registering endpoints for the server.
-type activeServer struct {
- srv *http.ServeMux
- errChan chan error
-}
-
-// Route contains common information for a route
-type Route struct {
- Webhook *Webhook
- Logger *logrus.Logger
- StartCh chan struct{}
- EventSource *gateways.EventSource
-}
-
-// RouteManager is an interface to manage the configuration for a route
-type RouteManager interface {
- GetRoute() *Route
- RouteHandler(writer http.ResponseWriter, request *http.Request)
- PostStart() error
- PostStop() error
-}
-
-// endpoint contains state of an http endpoint
-type Endpoint struct {
- // whether endpoint is active
- Active bool
- // data channel to receive data on this endpoint
- DataCh chan []byte
-}
-
-// NewWebhookHelper returns new Webhook helper
-func NewWebhookHelper() *WebhookHelper {
- return &WebhookHelper{
- ActiveEndpoints: make(map[string]*Endpoint),
- ActiveServers: make(map[string]*activeServer),
- Mutex: sync.Mutex{},
- RouteActivateChan: make(chan RouteManager),
- RouteDeactivateChan: make(chan RouteManager),
- }
-}
-
-// InitRouteChannels initializes route channels so they can activate and deactivate routes.
-func InitRouteChannels(helper *WebhookHelper) {
- for {
- select {
- case config := <-helper.RouteActivateChan:
- // start server if it has not been started on this port
- startHttpServer(config, helper)
- startCh := config.GetRoute().StartCh
- startCh <- struct{}{}
-
- case config := <-helper.RouteDeactivateChan:
- webhook := config.GetRoute().Webhook
- _, ok := helper.ActiveServers[webhook.Port]
- if ok {
- helper.ActiveEndpoints[webhook.Endpoint].Active = false
- }
- }
- }
-}
-
-// ServeHTTP implementation
-func (s *server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- s.mux.ServeHTTP(w, r)
-}
-
-// starts a http server
-func startHttpServer(routeManager RouteManager, helper *WebhookHelper) {
- // start a http server only if no other configuration previously started the server on given port
- helper.Mutex.Lock()
- r := routeManager.GetRoute()
- if _, ok := helper.ActiveServers[r.Webhook.Port]; !ok {
- s := &server{
- mux: http.NewServeMux(),
- }
- r.Webhook.mux = s.mux
- r.Webhook.srv = &http.Server{
- Addr: ":" + fmt.Sprintf("%s", r.Webhook.Port),
- Handler: s,
- }
- errChan := make(chan error, 1)
- helper.ActiveServers[r.Webhook.Port] = &activeServer{
- srv: s.mux,
- errChan: errChan,
- }
-
- // start http server
- go func() {
- var err error
- if r.Webhook.ServerCertPath == "" || r.Webhook.ServerKeyPath == "" {
- err = r.Webhook.srv.ListenAndServe()
- } else {
- err = r.Webhook.srv.ListenAndServeTLS(r.Webhook.ServerCertPath, r.Webhook.ServerKeyPath)
- }
- r.Logger.WithField(common.LabelEventSource, r.EventSource.Name).WithError(err).Error("http server stopped")
- if err != nil {
- errChan <- err
- }
- }()
- }
- helper.Mutex.Unlock()
-}
-
-// activateRoute activates route
-func activateRoute(routeManager RouteManager, helper *WebhookHelper) {
- r := routeManager.GetRoute()
- helper.RouteActivateChan <- routeManager
-
- <-r.StartCh
-
- if r.Webhook.mux == nil {
- helper.Mutex.Lock()
- r.Webhook.mux = helper.ActiveServers[r.Webhook.Port].srv
- helper.Mutex.Unlock()
- }
-
- log := r.Logger.WithFields(
- map[string]interface{}{
- common.LabelEventSource: r.EventSource.Name,
- common.LabelPort: r.Webhook.Port,
- common.LabelEndpoint: r.Webhook.Endpoint,
- })
-
- log.Info("adding route handler")
- if _, ok := helper.ActiveEndpoints[r.Webhook.Endpoint]; !ok {
- helper.ActiveEndpoints[r.Webhook.Endpoint] = &Endpoint{
- Active: true,
- DataCh: make(chan []byte),
- }
- r.Webhook.mux.HandleFunc(r.Webhook.Endpoint, routeManager.RouteHandler)
- }
- helper.ActiveEndpoints[r.Webhook.Endpoint].Active = true
-
- log.Info("route handler added")
-}
-
-func processChannels(routeManager RouteManager, helper *WebhookHelper, eventStream gateways.Eventing_StartEventSourceServer) error {
- r := routeManager.GetRoute()
-
- for {
- select {
- case data := <-helper.ActiveEndpoints[r.Webhook.Endpoint].DataCh:
- r.Logger.WithField(common.LabelEventSource, r.EventSource.Name).Info("new event received, dispatching to gateway client")
- err := eventStream.Send(&gateways.Event{
- Name: r.EventSource.Name,
- Payload: data,
- })
- if err != nil {
- r.Logger.WithField(common.LabelEventSource, r.EventSource.Name).WithError(err).Error("failed to send event")
- return err
- }
-
- case <-eventStream.Context().Done():
- r.Logger.WithField(common.LabelEventSource, r.EventSource.Name).Info("connection is closed by client")
- helper.RouteDeactivateChan <- routeManager
- return nil
-
- // this error indicates that the server has stopped running
- case err := <-helper.ActiveServers[r.Webhook.Port].errChan:
- return err
- }
- }
-}
-
-func ProcessRoute(routeManager RouteManager, helper *WebhookHelper, eventStream gateways.Eventing_StartEventSourceServer) error {
- r := routeManager.GetRoute()
- log := r.Logger.WithField(common.LabelEventSource, r.EventSource.Name)
-
- log.Info("validating the route")
- if err := validateRoute(routeManager.GetRoute()); err != nil {
- log.WithError(err).Error("error occurred validating route")
- return err
- }
-
- log.Info("activating the route")
- activateRoute(routeManager, helper)
-
- log.Info("running post start")
- if err := routeManager.PostStart(); err != nil {
- log.WithError(err).Error("error occurred in post start")
- return err
- }
-
- log.Info("processing channels")
- if err := processChannels(routeManager, helper, eventStream); err != nil {
- log.WithError(err).Error("error occurred in process channel")
- return err
- }
-
- log.Info("running post stop")
- if err := routeManager.PostStop(); err != nil {
- log.WithError(err).Error("error occurred in post stop")
- }
- return nil
-}
-
-func ValidateWebhook(w *Webhook) error {
- if w == nil {
- return fmt.Errorf("")
- }
- if w.Endpoint == "" {
- return fmt.Errorf("endpoint can't be empty")
- }
- if w.Port == "" {
- return fmt.Errorf("port can't be empty")
- }
- if w.Port != "" {
- _, err := strconv.Atoi(w.Port)
- if err != nil {
- return fmt.Errorf("failed to parse server port %s. err: %+v", w.Port, err)
- }
- }
- return nil
-}
-
-func validateRoute(r *Route) error {
- if r == nil {
- return fmt.Errorf("route can't be nil")
- }
- if r.Webhook == nil {
- return fmt.Errorf("webhook can't be nil")
- }
- if r.StartCh == nil {
- return fmt.Errorf("start channel can't be nil")
- }
- if r.EventSource == nil {
- return fmt.Errorf("event source can't be nil")
- }
- if r.Logger == nil {
- return fmt.Errorf("logger can't be nil")
- }
- return nil
-}
-
-func FormatWebhookEndpoint(endpoint string) string {
- if !strings.HasPrefix(endpoint, "/") {
- return fmt.Sprintf("/%s", endpoint)
- }
- return endpoint
-}
-
-func GenerateFormattedURL(w *Webhook) string {
- return fmt.Sprintf("%s%s", w.URL, FormatWebhookEndpoint(w.Endpoint))
-}
diff --git a/gateways/common/webhook_test.go b/gateways/common/webhook_test.go
deleted file mode 100644
index 931a0c86b6..0000000000
--- a/gateways/common/webhook_test.go
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package common
-
-import (
- "context"
- "fmt"
- "net/http"
- "testing"
- "time"
-
- "github.com/smartystreets/goconvey/convey"
-)
-
-var rc = &FakeRouteConfig{
- route: GetFakeRoute(),
-}
-
-func TestProcessRoute(t *testing.T) {
- convey.Convey("Given a route configuration", t, func() {
- convey.Convey("Activate the route configuration", func() {
-
- rc.route.Webhook.mux = http.NewServeMux()
-
- ctx, cancel := context.WithCancel(context.Background())
- fgs := &FakeGRPCStream{
- Ctx: ctx,
- }
-
- helper := NewWebhookHelper()
- helper.ActiveEndpoints[rc.route.Webhook.Endpoint] = &Endpoint{
- DataCh: make(chan []byte),
- }
- helper.ActiveServers[rc.route.Webhook.Port] = &activeServer{
- errChan: make(chan error),
- }
-
- errCh := make(chan error)
- go func() {
- <-helper.RouteDeactivateChan
- }()
-
- go func() {
- <-helper.RouteActivateChan
- }()
- go func() {
- rc.route.StartCh <- struct{}{}
- }()
- go func() {
- time.Sleep(3 * time.Second)
- cancel()
- }()
-
- go func() {
- errCh <- ProcessRoute(rc, helper, fgs)
- }()
-
- err := <-errCh
- convey.So(err, convey.ShouldBeNil)
- })
- })
-}
-
-func TestProcessRouteChannels(t *testing.T) {
- convey.Convey("Given a route configuration", t, func() {
- convey.Convey("Stop server stream", func() {
- ctx, cancel := context.WithCancel(context.Background())
- fgs := &FakeGRPCStream{
- Ctx: ctx,
- }
- helper := NewWebhookHelper()
- helper.ActiveEndpoints[rc.route.Webhook.Endpoint] = &Endpoint{
- DataCh: make(chan []byte),
- }
- helper.ActiveServers[rc.route.Webhook.Port] = &activeServer{
- errChan: make(chan error),
- }
- errCh := make(chan error)
- go func() {
- <-helper.RouteDeactivateChan
- }()
- go func() {
- errCh <- processChannels(rc, helper, fgs)
- }()
- cancel()
- err := <-errCh
- convey.So(err, convey.ShouldBeNil)
- })
- convey.Convey("Handle error", func() {
- fgs := &FakeGRPCStream{
- Ctx: context.Background(),
- }
- helper := NewWebhookHelper()
- helper.ActiveEndpoints[rc.route.Webhook.Endpoint] = &Endpoint{
- DataCh: make(chan []byte),
- }
- helper.ActiveServers[rc.route.Webhook.Port] = &activeServer{
- errChan: make(chan error),
- }
- errCh := make(chan error)
- err := fmt.Errorf("error")
- go func() {
- helper.ActiveServers[rc.route.Webhook.Port].errChan <- err
- }()
- go func() {
- errCh <- processChannels(rc, helper, fgs)
- }()
- newErr := <-errCh
- convey.So(newErr.Error(), convey.ShouldEqual, err.Error())
- })
- })
-}
-
-func TestFormatWebhookEndpoint(t *testing.T) {
- convey.Convey("Given a webhook endpoint, format it", t, func() {
- convey.So(FormatWebhookEndpoint("hello"), convey.ShouldEqual, "/hello")
- })
-}
-
-func TestValidateWebhook(t *testing.T) {
- convey.Convey("Given a webhook, validate it", t, func() {
- convey.So(ValidateWebhook(Hook), convey.ShouldBeNil)
- })
-}
-
-func TestGenerateFormattedURL(t *testing.T) {
- convey.Convey("Given a webhook, generate formatted URL", t, func() {
- convey.So(GenerateFormattedURL(Hook), convey.ShouldEqual, "test-url/fake")
- })
-}
-
-func TestNewWebhookHelper(t *testing.T) {
- convey.Convey("Make sure webhook helper is not empty", t, func() {
- helper := NewWebhookHelper()
- convey.So(helper, convey.ShouldNotBeNil)
- })
-}
diff --git a/gateways/community/aws-sns/config_test.go b/gateways/community/aws-sns/config_test.go
deleted file mode 100644
index 8e6a810b60..0000000000
--- a/gateways/community/aws-sns/config_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package aws_sns
-
-import (
- "testing"
-
- "github.com/smartystreets/goconvey/convey"
-)
-
-var es = `
-hook:
- endpoint: "/test"
- port: "8080"
- url: "myurl/test"
-topicArn: "test-arn"
-region: "us-east-1"
-accessKey:
- key: accesskey
- name: sns
-secretKey:
- key: secretkey
- name: sns
-`
-
-var esWithoutCreds = `
-hook:
- endpoint: "/test"
- port: "8080"
- url: "myurl/test"
-topicArn: "test-arn"
-region: "us-east-1"
-`
-
-func TestParseConfig(t *testing.T) {
- convey.Convey("Given a aws-sns event source, parse it", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
- _, ok := ps.(*snsEventSource)
- convey.So(ok, convey.ShouldEqual, true)
- })
-
- convey.Convey("Given a aws-sns event source without credentials, parse it", t, func() {
- ps, err := parseEventSource(esWithoutCreds)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
- _, ok := ps.(*snsEventSource)
- convey.So(ok, convey.ShouldEqual, true)
- })
-}
diff --git a/gateways/community/aws-sns/start.go b/gateways/community/aws-sns/start.go
deleted file mode 100644
index 5b15221901..0000000000
--- a/gateways/community/aws-sns/start.go
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package aws_sns
-
-import (
- "fmt"
- "io/ioutil"
- "net/http"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/aws/aws-sdk-go/aws/session"
- snslib "github.com/aws/aws-sdk-go/service/sns"
- "github.com/ghodss/yaml"
-)
-
-var (
- helper = gwcommon.NewWebhookHelper()
-)
-
-func init() {
- go gwcommon.InitRouteChannels(helper)
-}
-
-// GetRoute returns the route
-func (rc *RouteConfig) GetRoute() *gwcommon.Route {
- return rc.Route
-}
-
-// RouteHandler handles new routes
-func (rc *RouteConfig) RouteHandler(writer http.ResponseWriter, request *http.Request) {
- r := rc.Route
-
- logger := r.Logger.WithFields(
- map[string]interface{}{
- common.LabelEventSource: r.EventSource.Name,
- common.LabelEndpoint: r.Webhook.Endpoint,
- common.LabelPort: r.Webhook.Port,
- common.LabelHTTPMethod: r.Webhook.Method,
- })
-
- logger.Info("request received")
-
- if !helper.ActiveEndpoints[r.Webhook.Endpoint].Active {
- logger.Info("endpoint is not active")
- common.SendErrorResponse(writer, "")
- return
- }
-
- body, err := ioutil.ReadAll(request.Body)
- if err != nil {
- logger.WithError(err).Error("failed to parse request body")
- common.SendErrorResponse(writer, "")
- return
- }
-
- var snspayload *httpNotification
- err = yaml.Unmarshal(body, &snspayload)
- if err != nil {
- logger.WithError(err).Error("failed to convert request payload into sns event source payload")
- common.SendErrorResponse(writer, "")
- return
- }
-
- switch snspayload.Type {
- case messageTypeSubscriptionConfirmation:
- awsSession := rc.session
- out, err := awsSession.ConfirmSubscription(&snslib.ConfirmSubscriptionInput{
- TopicArn: &rc.snses.TopicArn,
- Token: &snspayload.Token,
- })
- if err != nil {
- logger.WithError(err).Error("failed to send confirmation response to amazon")
- common.SendErrorResponse(writer, "")
- return
- }
- rc.subscriptionArn = out.SubscriptionArn
-
- case messageTypeNotification:
- helper.ActiveEndpoints[r.Webhook.Endpoint].DataCh <- body
- }
-
- logger.Info("request successfully processed")
-}
-
-// PostStart subscribes to the sns topic
-func (rc *RouteConfig) PostStart() error {
- r := rc.Route
-
- logger := r.Logger.WithFields(
- map[string]interface{}{
- common.LabelEventSource: r.EventSource.Name,
- common.LabelEndpoint: r.Webhook.Endpoint,
- common.LabelPort: r.Webhook.Port,
- common.LabelHTTPMethod: r.Webhook.Method,
- "topic-arn": rc.snses.TopicArn,
- })
-
- logger.Info("subscribing to sns topic")
-
- sc := rc.snses
- var awsSession *session.Session
-
- if sc.AccessKey == nil && sc.SecretKey == nil {
- awsSessionWithoutCreds, err := gwcommon.GetAWSSessionWithoutCreds(sc.Region)
- if err != nil {
- return fmt.Errorf("failed to create aws session. err: %+v", err)
- }
-
- awsSession = awsSessionWithoutCreds
- } else {
- creds, err := gwcommon.GetAWSCreds(rc.clientset, rc.namespace, sc.AccessKey, sc.SecretKey)
- if err != nil {
- return fmt.Errorf("failed to create aws session. err: %+v", err)
- }
-
- awsSessionWithCreds, err := gwcommon.GetAWSSession(creds, sc.Region)
- if err != nil {
- return fmt.Errorf("failed to create aws session. err: %+v", err)
- }
-
- awsSession = awsSessionWithCreds
- }
-
- rc.session = snslib.New(awsSession)
- formattedUrl := gwcommon.GenerateFormattedURL(sc.Hook)
- if _, err := rc.session.Subscribe(&snslib.SubscribeInput{
- Endpoint: &formattedUrl,
- Protocol: &snsProtocol,
- TopicArn: &sc.TopicArn,
- }); err != nil {
- return fmt.Errorf("failed to send subscribe request. err: %+v", err)
- }
-
- return nil
-}
-
-// PostStop unsubscribes from the sns topic
-func (rc *RouteConfig) PostStop() error {
- if _, err := rc.session.Unsubscribe(&snslib.UnsubscribeInput{
- SubscriptionArn: rc.subscriptionArn,
- }); err != nil {
- return fmt.Errorf("failed to unsubscribe. err: %+v", err)
- }
- return nil
-}
-
-// StartEventSource starts an SNS event source
-func (ese *SNSEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
- defer gateways.Recover(eventSource.Name)
-
- log := ese.Log.WithField(common.LabelEventSource, eventSource.Name)
- log.Info("operating on event source")
-
- config, err := parseEventSource(eventSource.Data)
- if err != nil {
- log.WithError(err).Error("failed to parse event source")
- return err
- }
- sc := config.(*snsEventSource)
-
- return gwcommon.ProcessRoute(&RouteConfig{
- Route: &gwcommon.Route{
- Logger: ese.Log,
- EventSource: eventSource,
- StartCh: make(chan struct{}),
- Webhook: sc.Hook,
- },
- snses: sc,
- namespace: ese.Namespace,
- clientset: ese.Clientset,
- }, helper, eventStream)
-}
diff --git a/gateways/community/aws-sns/start_test.go b/gateways/community/aws-sns/start_test.go
deleted file mode 100644
index c6eb1b2bea..0000000000
--- a/gateways/community/aws-sns/start_test.go
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package aws_sns
-
-import (
- "bytes"
- "io/ioutil"
- "net/http"
- "testing"
-
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/aws/aws-sdk-go/aws/credentials"
- snslib "github.com/aws/aws-sdk-go/service/sns"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- "k8s.io/client-go/kubernetes/fake"
-)
-
-func TestAWSSNS(t *testing.T) {
- convey.Convey("Given an route configuration", t, func() {
- rc := &RouteConfig{
- Route: gwcommon.GetFakeRoute(),
- namespace: "fake",
- clientset: fake.NewSimpleClientset(),
- }
- r := rc.Route
-
- helper.ActiveEndpoints[r.Webhook.Endpoint] = &gwcommon.Endpoint{
- DataCh: make(chan []byte),
- }
- writer := &gwcommon.FakeHttpWriter{}
- subscriptionArn := "arn://fake"
- awsSession, err := gwcommon.GetAWSSession(credentials.NewStaticCredentialsFromCreds(credentials.Value{
- AccessKeyID: "access",
- SecretAccessKey: "secret",
- }), "mock-region")
-
- convey.So(err, convey.ShouldBeNil)
-
- snsSession := snslib.New(awsSession)
- rc.session = snsSession
- rc.subscriptionArn = &subscriptionArn
-
- convey.Convey("handle the inactive route", func() {
- rc.RouteHandler(writer, &http.Request{})
- convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusBadRequest)
- })
-
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
-
- helper.ActiveEndpoints[r.Webhook.Endpoint].Active = true
- rc.snses = ps.(*snsEventSource)
-
- convey.Convey("handle the active route", func() {
- payload := httpNotification{
- TopicArn: "arn://fake",
- Token: "faketoken",
- Type: messageTypeSubscriptionConfirmation,
- }
-
- payloadBytes, err := yaml.Marshal(payload)
- convey.So(err, convey.ShouldBeNil)
- rc.RouteHandler(writer, &http.Request{
- Body: ioutil.NopCloser(bytes.NewBuffer(payloadBytes)),
- })
- convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusBadRequest)
-
- dataCh := make(chan []byte)
-
- go func() {
- data := <-helper.ActiveEndpoints[r.Webhook.Endpoint].DataCh
- dataCh <- data
- }()
-
- payload.Type = messageTypeNotification
- payloadBytes, err = yaml.Marshal(payload)
- convey.So(err, convey.ShouldBeNil)
- rc.RouteHandler(writer, &http.Request{
- Body: ioutil.NopCloser(bytes.NewBuffer(payloadBytes)),
- })
- data := <-dataCh
- convey.So(data, convey.ShouldNotBeNil)
- })
-
- convey.Convey("Run post activate", func() {
- err := rc.PostStart()
- convey.So(err, convey.ShouldNotBeNil)
- })
-
- convey.Convey("Run post stop", func() {
- err = rc.PostStop()
- convey.So(err, convey.ShouldNotBeNil)
- })
-
- psWithoutCreds, err2 := parseEventSource(esWithoutCreds)
- convey.So(err2, convey.ShouldBeNil)
-
- rc.snses = psWithoutCreds.(*snsEventSource)
-
- convey.Convey("Run post activate on event source without credentials", func() {
- err := rc.PostStart()
- convey.So(err, convey.ShouldNotBeNil)
- })
- })
-}
diff --git a/gateways/community/aws-sns/validate.go b/gateways/community/aws-sns/validate.go
deleted file mode 100644
index 5e70c66e19..0000000000
--- a/gateways/community/aws-sns/validate.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package aws_sns
-
-import (
- "context"
- "fmt"
-
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
-)
-
-// ValidateEventSource validates gateway event source
-func (ese *SNSEventSourceExecutor) ValidateEventSource(ctx context.Context, es *gateways.EventSource) (*gateways.ValidEventSource, error) {
- return gwcommon.ValidateGatewayEventSource(es, ArgoEventsEventSourceVersion, parseEventSource, validateSNSConfig)
-}
-
-func validateSNSConfig(config interface{}) error {
- sc := config.(*snsEventSource)
- if sc == nil {
- return gwcommon.ErrNilEventSource
- }
- if sc.TopicArn == "" {
- return fmt.Errorf("must specify topic arn")
- }
- if sc.Region == "" {
- return fmt.Errorf("must specify region")
- }
- return gwcommon.ValidateWebhook(sc.Hook)
-}
diff --git a/gateways/community/aws-sns/validate_test.go b/gateways/community/aws-sns/validate_test.go
deleted file mode 100644
index 4809bb2b6d..0000000000
--- a/gateways/community/aws-sns/validate_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package aws_sns
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "testing"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
-)
-
-func TestSNSEventSourceExecutor_ValidateEventSource(t *testing.T) {
- convey.Convey("Given sns event source spec, parse it and make sure no error occurs", t, func() {
- ese := &SNSEventSourceExecutor{}
- content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gwcommon.EventSourceDir, "aws-sns.yaml"))
- convey.So(err, convey.ShouldBeNil)
-
- var cm *corev1.ConfigMap
- err = yaml.Unmarshal(content, &cm)
- convey.So(err, convey.ShouldBeNil)
- convey.So(cm, convey.ShouldNotBeNil)
-
- err = common.CheckEventSourceVersion(cm)
- convey.So(err, convey.ShouldBeNil)
-
- for key, value := range cm.Data {
- valid, _ := ese.ValidateEventSource(context.Background(), &gateways.EventSource{
- Name: key,
- Id: common.Hasher(key),
- Data: value,
- Version: cm.Labels[common.LabelArgoEventsEventSourceVersion],
- })
- convey.So(valid, convey.ShouldNotBeNil)
- convey.So(valid.IsValid, convey.ShouldBeTrue)
- }
- })
-}
diff --git a/gateways/community/aws-sqs/config.go b/gateways/community/aws-sqs/config.go
deleted file mode 100644
index 7d48ad6bc9..0000000000
--- a/gateways/community/aws-sqs/config.go
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package aws_sqs
-
-import (
- "github.com/ghodss/yaml"
- "github.com/sirupsen/logrus"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/client-go/kubernetes"
-)
-
-const ArgoEventsEventSourceVersion = "v0.11"
-
-// SQSEventSourceExecutor implements Eventing
-type SQSEventSourceExecutor struct {
- Log *logrus.Logger
- // Clientset is kubernetes client
- Clientset kubernetes.Interface
- // Namespace where gateway is deployed
- Namespace string
-}
-
-// sqsEventSource contains information to listen to AWS SQS
-type sqsEventSource struct {
- // AccessKey refers K8 secret containing aws access key
- AccessKey *corev1.SecretKeySelector `json:"accessKey"`
-
- // SecretKey refers K8 secret containing aws secret key
- SecretKey *corev1.SecretKeySelector `json:"secretKey"`
-
- // Region is AWS region
- Region string `json:"region"`
-
- // Queue is AWS SQS queue to listen to for messages
- Queue string `json:"queue"`
-
- // WaitTimeSeconds is The duration (in seconds) for which the call waits for a message to arrive
- // in the queue before returning.
- WaitTimeSeconds int64 `json:"waitTimeSeconds"`
-}
-
-func parseEventSource(es string) (interface{}, error) {
- var n *sqsEventSource
- err := yaml.Unmarshal([]byte(es), &n)
- if err != nil {
- return nil, err
- }
- return n, nil
-}
diff --git a/gateways/community/aws-sqs/config_test.go b/gateways/community/aws-sqs/config_test.go
deleted file mode 100644
index 99bac49c6a..0000000000
--- a/gateways/community/aws-sqs/config_test.go
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package aws_sqs
-
-import (
- "testing"
-
- "github.com/smartystreets/goconvey/convey"
-)
-
-var es = `
-region: "us-east-1"
-accessKey:
- key: accesskey
- name: sns
-secretKey:
- key: secretkey
- name: sns
-queue: "test-queue"
-waitTimeSeconds: 10
-`
-
-var esWithoutCreds = `
-region: "us-east-1"
-queue: "test-queue"
-waitTimeSeconds: 10
-`
-
-func TestParseConfig(t *testing.T) {
- convey.Convey("Given a aws-sqsEventSource event source, parse it", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
- _, ok := ps.(*sqsEventSource)
- convey.So(ok, convey.ShouldEqual, true)
- })
-
- convey.Convey("Given a aws-sqsEventSource event source without AWS credentials, parse it", t, func() {
- ps, err := parseEventSource(esWithoutCreds)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
- _, ok := ps.(*sqsEventSource)
- convey.So(ok, convey.ShouldEqual, true)
- })
-}
diff --git a/gateways/community/aws-sqs/start.go b/gateways/community/aws-sqs/start.go
deleted file mode 100644
index d2bb8015f9..0000000000
--- a/gateways/community/aws-sqs/start.go
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package aws_sqs
-
-import (
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/session"
- sqslib "github.com/aws/aws-sdk-go/service/sqs"
-)
-
-// StartEventSource starts an event source
-func (ese *SQSEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
- defer gateways.Recover(eventSource.Name)
-
- log := ese.Log.WithField(common.LabelEventSource, eventSource.Name)
- log.Info("activating event source")
-
- config, err := parseEventSource(eventSource.Data)
- if err != nil {
- log.WithError(err).Error("failed to parse event source")
- return err
- }
-
- dataCh := make(chan []byte)
- errorCh := make(chan error)
- doneCh := make(chan struct{}, 1)
-
- go ese.listenEvents(config.(*sqsEventSource), eventSource, dataCh, errorCh, doneCh)
-
- return gateways.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, ese.Log)
-}
-
-// listenEvents fires an event when interval completes and item is processed from queue.
-func (ese *SQSEventSourceExecutor) listenEvents(s *sqsEventSource, eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
- var awsSession *session.Session
-
- if s.AccessKey == nil && s.SecretKey == nil {
- awsSessionWithoutCreds, err := gwcommon.GetAWSSessionWithoutCreds(s.Region)
- if err != nil {
- errorCh <- err
- return
- }
-
- awsSession = awsSessionWithoutCreds
- } else {
- creds, err := gwcommon.GetAWSCreds(ese.Clientset, ese.Namespace, s.AccessKey, s.SecretKey)
- if err != nil {
- errorCh <- err
- return
- }
-
- awsSessionWithCreds, err := gwcommon.GetAWSSession(creds, s.Region)
- if err != nil {
- errorCh <- err
- return
- }
-
- awsSession = awsSessionWithCreds
- }
-
- sqsClient := sqslib.New(awsSession)
-
- queueURL, err := sqsClient.GetQueueUrl(&sqslib.GetQueueUrlInput{
- QueueName: &s.Queue,
- })
- if err != nil {
- errorCh <- err
- return
- }
-
- for {
- select {
- case <-doneCh:
- return
-
- default:
- msg, err := sqsClient.ReceiveMessage(&sqslib.ReceiveMessageInput{
- QueueUrl: queueURL.QueueUrl,
- MaxNumberOfMessages: aws.Int64(1),
- WaitTimeSeconds: aws.Int64(s.WaitTimeSeconds),
- })
- if err != nil {
- ese.Log.WithField(common.LabelEventSource, eventSource.Name).WithError(err).Error("failed to process item from queue, waiting for next timeout")
- continue
- }
-
- if msg != nil && len(msg.Messages) > 0 {
- dataCh <- []byte(*msg.Messages[0].Body)
-
- if _, err := sqsClient.DeleteMessage(&sqslib.DeleteMessageInput{
- QueueUrl: queueURL.QueueUrl,
- ReceiptHandle: msg.Messages[0].ReceiptHandle,
- }); err != nil {
- errorCh <- err
- return
- }
- }
- }
- }
-}
diff --git a/gateways/community/aws-sqs/start_test.go b/gateways/community/aws-sqs/start_test.go
deleted file mode 100644
index 9d62dfbfbb..0000000000
--- a/gateways/community/aws-sqs/start_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package aws_sqs
-
-import (
- "testing"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "github.com/smartystreets/goconvey/convey"
- "k8s.io/client-go/kubernetes/fake"
-)
-
-func TestListenEvents(t *testing.T) {
- convey.Convey("Given an event source, listen to events", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
-
- ese := &SQSEventSourceExecutor{
- Clientset: fake.NewSimpleClientset(),
- Namespace: "fake",
- Log: common.NewArgoEventsLogger(),
- }
-
- dataCh := make(chan []byte)
- errorCh := make(chan error)
- doneCh := make(chan struct{}, 1)
- errorCh2 := make(chan error)
-
- go func() {
- err := <-errorCh
- errorCh2 <- err
- }()
-
- ese.listenEvents(ps.(*sqsEventSource), &gateways.EventSource{
- Name: "fake",
- Data: es,
- Id: "1234",
- }, dataCh, errorCh, doneCh)
-
- err = <-errorCh2
- convey.So(err, convey.ShouldNotBeNil)
- })
-
- convey.Convey("Given an event source without AWS credentials, listen to events", t, func() {
- ps, err := parseEventSource(esWithoutCreds)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
-
- ese := &SQSEventSourceExecutor{
- Clientset: fake.NewSimpleClientset(),
- Namespace: "fake",
- Log: common.NewArgoEventsLogger(),
- }
-
- dataCh := make(chan []byte)
- errorCh := make(chan error)
- doneCh := make(chan struct{}, 1)
- errorCh2 := make(chan error)
-
- go func() {
- err := <-errorCh
- errorCh2 <- err
- }()
-
- ese.listenEvents(ps.(*sqsEventSource), &gateways.EventSource{
- Name: "fake",
- Data: es,
- Id: "1234",
- }, dataCh, errorCh, doneCh)
-
- err = <-errorCh2
- convey.So(err, convey.ShouldNotBeNil)
- })
-}
diff --git a/gateways/community/aws-sqs/validate.go b/gateways/community/aws-sqs/validate.go
deleted file mode 100644
index 3a4f0ab6ed..0000000000
--- a/gateways/community/aws-sqs/validate.go
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package aws_sqs
-
-import (
- "context"
- "fmt"
-
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
-)
-
-// ValidateEventSource validates gateway event source
-func (ese *SQSEventSourceExecutor) ValidateEventSource(ctx context.Context, es *gateways.EventSource) (*gateways.ValidEventSource, error) {
- return gwcommon.ValidateGatewayEventSource(es, ArgoEventsEventSourceVersion, parseEventSource, validateSQSConfig)
-}
-
-func validateSQSConfig(config interface{}) error {
- sc := config.(*sqsEventSource)
- if sc == nil {
- return gwcommon.ErrNilEventSource
- }
- if sc.WaitTimeSeconds == 0 {
- return fmt.Errorf("must specify polling timeout")
- }
- if sc.Region == "" {
- return fmt.Errorf("must specify region")
- }
- if sc.Queue == "" {
- return fmt.Errorf("must specify queue name")
- }
- return nil
-}
diff --git a/gateways/community/aws-sqs/validate_test.go b/gateways/community/aws-sqs/validate_test.go
deleted file mode 100644
index 2abeac0ea1..0000000000
--- a/gateways/community/aws-sqs/validate_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package aws_sqs
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "testing"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
-)
-
-func TestSQSEventSourceExecutor_ValidateEventSource(t *testing.T) {
- convey.Convey("Given a valid sqsEventSource event source spec, parse it and make sure no error occurs", t, func() {
- ese := &SQSEventSourceExecutor{}
- content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gwcommon.EventSourceDir, "aws-sqs.yaml"))
- convey.So(err, convey.ShouldBeNil)
-
- var cm *corev1.ConfigMap
- err = yaml.Unmarshal(content, &cm)
- convey.So(err, convey.ShouldBeNil)
- convey.So(cm, convey.ShouldNotBeNil)
-
- err = common.CheckEventSourceVersion(cm)
- convey.So(err, convey.ShouldBeNil)
-
- for key, value := range cm.Data {
- valid, _ := ese.ValidateEventSource(context.Background(), &gateways.EventSource{
- Name: key,
- Id: common.Hasher(key),
- Data: value,
- Version: cm.Labels[common.LabelArgoEventsEventSourceVersion],
- })
- convey.So(valid, convey.ShouldNotBeNil)
- convey.So(valid.IsValid, convey.ShouldBeTrue)
- }
- })
-}
diff --git a/gateways/community/gcp-pubsub/config.go b/gateways/community/gcp-pubsub/config.go
deleted file mode 100644
index ec24a06304..0000000000
--- a/gateways/community/gcp-pubsub/config.go
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package pubsub
-
-import (
- "github.com/ghodss/yaml"
- "github.com/sirupsen/logrus"
-)
-
-const ArgoEventsEventSourceVersion = "v0.11"
-
-// GcpPubSubEventSourceExecutor implements Eventing
-type GcpPubSubEventSourceExecutor struct {
- Log *logrus.Logger
-}
-
-// pubSubEventSource contains configuration to subscribe to GCP PubSub topic
-type pubSubEventSource struct {
- // ProjectID is the unique identifier for your project on GCP
- ProjectID string `json:"projectID"`
- // TopicProjectID identifies the project where the topic should exist or be created
- // (assumed to be the same as ProjectID by default)
- TopicProjectID string `json:"topicProjectID"`
- // Topic on which a subscription will be created
- Topic string `json:"topic"`
- // CredentialsFile is the file that contains credentials to authenticate for GCP
- CredentialsFile string `json:"credentialsFile"`
-}
-
-func parseEventSource(es string) (interface{}, error) {
- var n *pubSubEventSource
- err := yaml.Unmarshal([]byte(es), &n)
- if err != nil {
- return nil, err
- }
- return n, nil
-}
diff --git a/gateways/community/gcp-pubsub/start.go b/gateways/community/gcp-pubsub/start.go
deleted file mode 100644
index dade593c8b..0000000000
--- a/gateways/community/gcp-pubsub/start.go
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package pubsub
-
-import (
- "context"
- "fmt"
-
- "cloud.google.com/go/pubsub"
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "google.golang.org/api/option"
-)
-
-// StartEventSource starts the GCP PubSub Gateway
-func (ese *GcpPubSubEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
- defer gateways.Recover(eventSource.Name)
-
- log := ese.Log.WithField(common.LabelEventSource, eventSource.Name)
- ese.Log.Info("operating on event source")
-
- config, err := parseEventSource(eventSource.Data)
- if err != nil {
- log.WithError(err).Info("failed to parse event source")
- return err
- }
- sc := config.(*pubSubEventSource)
-
- ctx := eventStream.Context()
-
- dataCh := make(chan []byte)
- errorCh := make(chan error)
- doneCh := make(chan struct{}, 1)
-
- go ese.listenEvents(ctx, sc, eventSource, dataCh, errorCh, doneCh)
-
- return gateways.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, ese.Log)
-}
-
-func (ese *GcpPubSubEventSourceExecutor) listenEvents(ctx context.Context, sc *pubSubEventSource, eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
- // Create a new topic with the given name if none exists
- logger := ese.Log.WithField(common.LabelEventSource, eventSource.Name).WithField("topic", sc.Topic)
-
- client, err := pubsub.NewClient(ctx, sc.ProjectID, option.WithCredentialsFile(sc.CredentialsFile))
- if err != nil {
- errorCh <- err
- return
- }
-
- topicClient := client // use same client for topic and subscription by default
- if sc.TopicProjectID != "" && sc.TopicProjectID != sc.ProjectID {
- topicClient, err = pubsub.NewClient(ctx, sc.TopicProjectID, option.WithCredentialsFile(sc.CredentialsFile))
- if err != nil {
- errorCh <- err
- return
- }
- }
-
- topic := topicClient.Topic(sc.Topic)
- exists, err := topic.Exists(ctx)
- if err != nil {
- errorCh <- err
- return
- }
- if !exists {
- logger.Info("Creating GCP PubSub topic")
- if _, err := topicClient.CreateTopic(ctx, sc.Topic); err != nil {
- errorCh <- err
- return
- }
- }
-
- logger.Info("Subscribing to GCP PubSub topic")
- subscription_name := fmt.Sprintf("%s-%s", eventSource.Name, eventSource.Id)
- subscription := client.Subscription(subscription_name)
- exists, err = subscription.Exists(ctx)
-
- if err != nil {
- errorCh <- err
- return
- }
- if exists {
- logger.Warn("Using an existing subscription")
- } else {
- logger.Info("Creating subscription")
- if _, err := client.CreateSubscription(ctx, subscription_name, pubsub.SubscriptionConfig{Topic: topic}); err != nil {
- errorCh <- err
- return
- }
- }
-
- err = subscription.Receive(ctx, func(msgCtx context.Context, m *pubsub.Message) {
- logger.Info("received GCP PubSub Message from topic")
- dataCh <- m.Data
- m.Ack()
- })
- if err != nil {
- errorCh <- err
- return
- }
-
- <-doneCh
-
- // after this point, panic on errors
- logger.Info("deleting GCP PubSub subscription")
- if err = subscription.Delete(context.Background()); err != nil {
- panic(err)
- }
-
- logger.Info("closing GCP PubSub client")
- if err = client.Close(); err != nil {
- panic(err)
- }
-}
diff --git a/gateways/community/gcp-pubsub/start_test.go b/gateways/community/gcp-pubsub/start_test.go
deleted file mode 100644
index 1781f5c3e8..0000000000
--- a/gateways/community/gcp-pubsub/start_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package pubsub
-
-import (
- "context"
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "github.com/smartystreets/goconvey/convey"
- "testing"
-)
-
-func TestListenEvents(t *testing.T) {
- convey.Convey("Given a pubsub event source, listen to events", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
- psc := ps.(*pubSubEventSource)
-
- ese := &GcpPubSubEventSourceExecutor{
- Log: common.NewArgoEventsLogger(),
- }
-
- dataCh := make(chan []byte)
- errorCh := make(chan error)
- doneCh := make(chan struct{}, 1)
- errCh2 := make(chan error)
-
- go func() {
- err := <-errorCh
- errCh2 <- err
- }()
-
- ese.listenEvents(context.Background(), psc, &gateways.EventSource{
- Name: "fake",
- Data: es,
- Id: "1234",
- }, dataCh, errorCh, doneCh)
-
- err = <-errCh2
- convey.So(err, convey.ShouldNotBeNil)
- })
-}
diff --git a/gateways/community/gcp-pubsub/validate.go b/gateways/community/gcp-pubsub/validate.go
deleted file mode 100644
index b8b57760d1..0000000000
--- a/gateways/community/gcp-pubsub/validate.go
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package pubsub
-
-import (
- "context"
- "fmt"
- "os"
-
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
-)
-
-// ValidateEventSource validates gateway event source
-func (ese *GcpPubSubEventSourceExecutor) ValidateEventSource(ctx context.Context, es *gateways.EventSource) (*gateways.ValidEventSource, error) {
- return gwcommon.ValidateGatewayEventSource(es, ArgoEventsEventSourceVersion, parseEventSource, validatePubSubConfig)
-}
-
-func validatePubSubConfig(config interface{}) error {
- sc := config.(*pubSubEventSource)
- if sc == nil {
- return gwcommon.ErrNilEventSource
- }
- if sc.ProjectID == "" {
- return fmt.Errorf("must specify projectId")
- }
- if sc.Topic == "" {
- return fmt.Errorf("must specify topic")
- }
- if sc.CredentialsFile != "" {
- if _, err := os.Stat(sc.CredentialsFile); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/gateways/community/gcp-pubsub/validate_test.go b/gateways/community/gcp-pubsub/validate_test.go
deleted file mode 100644
index 707b77752d..0000000000
--- a/gateways/community/gcp-pubsub/validate_test.go
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package pubsub
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "testing"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
-)
-
-func TestGcpPubSubEventSourceExecutor_ValidateEventSource(t *testing.T) {
- convey.Convey("Given a valid gcp pub-sub event source spec, parse it and make sure no error occurs", t, func() {
- ese := &GcpPubSubEventSourceExecutor{}
- content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gwcommon.EventSourceDir, "gcp-pubsub.yaml"))
- convey.So(err, convey.ShouldBeNil)
-
- var cm *corev1.ConfigMap
- err = yaml.Unmarshal(content, &cm)
- convey.So(err, convey.ShouldBeNil)
- convey.So(cm, convey.ShouldNotBeNil)
-
- err = common.CheckEventSourceVersion(cm)
- convey.So(err, convey.ShouldBeNil)
-
- for key, value := range cm.Data {
- valid, _ := ese.ValidateEventSource(context.Background(), &gateways.EventSource{
- Name: key,
- Id: common.Hasher(key),
- Data: value,
- Version: cm.Labels[common.LabelArgoEventsEventSourceVersion],
- })
- convey.So(valid, convey.ShouldNotBeNil)
- convey.Println(valid.Reason)
- convey.So(valid.IsValid, convey.ShouldBeTrue)
- }
- })
-}
diff --git a/gateways/community/github/config.go b/gateways/community/github/config.go
deleted file mode 100644
index 703a4da885..0000000000
--- a/gateways/community/github/config.go
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
-Copyright 2018 KompiTech GmbH
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package github
-
-import (
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/google/go-github/github"
- "github.com/sirupsen/logrus"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/client-go/kubernetes"
-)
-
-const ArgoEventsEventSourceVersion = "v0.11"
-
-// GithubEventSourceExecutor implements ConfigExecutor
-type GithubEventSourceExecutor struct {
- Log *logrus.Logger
- // Clientset is kubernetes client
- Clientset kubernetes.Interface
- // Namespace where gateway is deployed
- Namespace string
-}
-
-// RouteConfig contains information about the route
-type RouteConfig struct {
- route *gwcommon.Route
- ges *githubEventSource
- client *github.Client
- hook *github.Hook
- clientset kubernetes.Interface
- namespace string
-}
-
-// githubEventSource contains information to setup a github project integration
-type githubEventSource struct {
- // Webhook ID
- Id int64 `json:"id"`
- // Webhook
- Hook *gwcommon.Webhook `json:"hook"`
- // GitHub owner name i.e. argoproj
- Owner string `json:"owner"`
- // GitHub repo name i.e. argo-events
- Repository string `json:"repository"`
- // Github events to subscribe to which the gateway will subscribe
- Events []string `json:"events"`
- // K8s secret containing github api token
- APIToken *corev1.SecretKeySelector `json:"apiToken"`
- // K8s secret containing WebHook Secret
- WebHookSecret *corev1.SecretKeySelector `json:"webHookSecret"`
- // Insecure tls verification
- Insecure bool `json:"insecure"`
- // Active
- Active bool `json:"active"`
- // ContentType json or form
- ContentType string `json:"contentType"`
- // GitHub base URL (for GitHub Enterprise)
- GithubBaseURL string `json:"githubBaseURL"`
- // GitHub upload URL (for GitHub Enterprise)
- GithubUploadURL string `json:"githubUploadURL"`
-}
-
-// cred stores the api access token or webhook secret
-type cred struct {
- secret string
-}
-
-// parseEventSource parses a configuration of gateway
-func parseEventSource(config string) (interface{}, error) {
- var g *githubEventSource
- err := yaml.Unmarshal([]byte(config), &g)
- if err != nil {
- return nil, err
- }
- return g, err
-}
diff --git a/gateways/community/github/config_test.go b/gateways/community/github/config_test.go
deleted file mode 100644
index 08bb2e6c1f..0000000000
--- a/gateways/community/github/config_test.go
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
-Copyright 2018 KompiTech GmbH
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package github
-
-import (
- "github.com/smartystreets/goconvey/convey"
- "testing"
-)
-
-var es = `
-id: 1234
-hook:
- endpoint: "/push"
- port: "12000"
- url: "http://webhook-gateway-svc"
-owner: "asd"
-repository: "dsa"
-events:
-- PushEvents
-apiToken:
- key: accesskey
- name: githab-access
-`
-
-func TestParseConfig(t *testing.T) {
- convey.Convey("Given a github event source, parse it", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
- _, ok := ps.(*githubEventSource)
- convey.So(ok, convey.ShouldEqual, true)
- })
-}
diff --git a/gateways/community/github/start.go b/gateways/community/github/start.go
deleted file mode 100644
index b31f3e0c52..0000000000
--- a/gateways/community/github/start.go
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
-Copyright 2018 KompiTech GmbH
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package github
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/http"
- "net/url"
- "time"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/argoproj/argo-events/store"
- gh "github.com/google/go-github/github"
- corev1 "k8s.io/api/core/v1"
-)
-
-const (
- githubEventHeader = "X-GitHub-Event"
- githubDeliveryHeader = "X-GitHub-Delivery"
-)
-
-var (
- helper = gwcommon.NewWebhookHelper()
-)
-
-func init() {
- go gwcommon.InitRouteChannels(helper)
-}
-
-// getCredentials for github
-func (rc *RouteConfig) getCredentials(gs *corev1.SecretKeySelector) (*cred, error) {
- token, err := store.GetSecrets(rc.clientset, rc.namespace, gs.Name, gs.Key)
- if err != nil {
- return nil, err
- }
- return &cred{
- secret: token,
- }, nil
-}
-
-func (rc *RouteConfig) GetRoute() *gwcommon.Route {
- return rc.route
-}
-
-func (rc *RouteConfig) PostStart() error {
- gc := rc.ges
-
- c, err := rc.getCredentials(gc.APIToken)
- if err != nil {
- return fmt.Errorf("failed to rtrieve github credentials. err: %+v", err)
- }
-
- PATTransport := TokenAuthTransport{
- Token: c.secret,
- }
-
- formattedUrl := gwcommon.GenerateFormattedURL(gc.Hook)
- hookConfig := map[string]interface{}{
- "url": &formattedUrl,
- }
-
- if gc.ContentType != "" {
- hookConfig["content_type"] = gc.ContentType
- }
-
- if gc.Insecure {
- hookConfig["insecure_ssl"] = "1"
- } else {
- hookConfig["insecure_ssl"] = "0"
- }
-
- if gc.WebHookSecret != nil {
- sc, err := rc.getCredentials(gc.WebHookSecret)
- if err != nil {
- return fmt.Errorf("failed to retrieve webhook secret. err: %+v", err)
- }
- hookConfig["secret"] = sc.secret
- }
-
- rc.hook = &gh.Hook{
- Events: gc.Events,
- Active: gh.Bool(gc.Active),
- Config: hookConfig,
- }
-
- rc.client = gh.NewClient(PATTransport.Client())
- if gc.GithubBaseURL != "" {
- baseURL, err := url.Parse(gc.GithubBaseURL)
- if err != nil {
- return fmt.Errorf("failed to parse github base url. err: %s", err)
- }
- rc.client.BaseURL = baseURL
- }
- if gc.GithubUploadURL != "" {
- uploadURL, err := url.Parse(gc.GithubUploadURL)
- if err != nil {
- return fmt.Errorf("failed to parse github upload url. err: %s", err)
- }
- rc.client.UploadURL = uploadURL
- }
-
- ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
- defer cancel()
- hook, _, err := rc.client.Repositories.CreateHook(ctx, gc.Owner, gc.Repository, rc.hook)
- if err != nil {
- // Continue if error is because hook already exists
- er, ok := err.(*gh.ErrorResponse)
- if !ok || er.Response.StatusCode != http.StatusUnprocessableEntity {
- return fmt.Errorf("failed to create webhook. err: %+v", err)
- }
- }
-
- if hook == nil {
- ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
- defer cancel()
- hooks, _, err := rc.client.Repositories.ListHooks(ctx, gc.Owner, gc.Repository, nil)
- if err != nil {
- return fmt.Errorf("failed to list existing webhooks. err: %+v", err)
- }
-
- hook = getHook(hooks, formattedUrl, gc.Events)
- if hook == nil {
- return fmt.Errorf("failed to find existing webhook.")
- }
- }
-
- if gc.WebHookSecret != nil {
- // As secret in hook config is masked with asterisk (*), replace it with unmasked secret.
- hook.Config["secret"] = hookConfig["secret"]
- }
-
- rc.hook = hook
- rc.route.Logger.WithField(common.LabelEventSource, rc.route.EventSource.Name).Info("github hook created")
- return nil
-}
-
-// PostStop runs after event source is stopped
-func (rc *RouteConfig) PostStop() error {
- ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
- defer cancel()
- if _, err := rc.client.Repositories.DeleteHook(ctx, rc.ges.Owner, rc.ges.Repository, *rc.hook.ID); err != nil {
- return fmt.Errorf("failed to delete hook. err: %+v", err)
- }
- rc.route.Logger.WithField(common.LabelEventSource, rc.route.EventSource.Name).Info("github hook deleted")
- return nil
-}
-
-// StartEventSource starts an event source
-func (ese *GithubEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
- defer gateways.Recover(eventSource.Name)
-
- log := ese.Log.WithField(common.LabelEventSource, eventSource.Name)
- log.Info("operating on event source")
-
- config, err := parseEventSource(eventSource.Data)
- if err != nil {
- log.WithError(err).Error("failed to parse event source")
- return err
- }
- gc := config.(*githubEventSource)
-
- return gwcommon.ProcessRoute(&RouteConfig{
- route: &gwcommon.Route{
- Logger: ese.Log,
- EventSource: eventSource,
- Webhook: gc.Hook,
- StartCh: make(chan struct{}),
- },
- clientset: ese.Clientset,
- namespace: ese.Namespace,
- ges: gc,
- }, helper, eventStream)
-}
-
-func parseValidateRequest(r *http.Request, secret []byte) ([]byte, error) {
- body, err := gh.ValidatePayload(r, secret)
- if err != nil {
- return nil, err
- }
-
- payload := make(map[string]interface{})
- if err := json.Unmarshal(body, &payload); err != nil {
- return nil, err
- }
- for _, h := range []string{
- githubEventHeader,
- githubDeliveryHeader,
- } {
- payload[h] = r.Header.Get(h)
- }
- return json.Marshal(payload)
-}
-
-// routeActiveHandler handles new route
-func (rc *RouteConfig) RouteHandler(writer http.ResponseWriter, request *http.Request) {
- r := rc.route
-
- logger := r.Logger.WithFields(
- map[string]interface{}{
- common.LabelEventSource: r.EventSource.Name,
- common.LabelEndpoint: r.Webhook.Endpoint,
- common.LabelPort: r.Webhook.Port,
- "hi": "lol",
- })
-
- logger.Info("request received")
-
- if !helper.ActiveEndpoints[r.Webhook.Endpoint].Active {
- logger.Info("endpoint is not active")
- common.SendErrorResponse(writer, "")
- return
- }
-
- hook := rc.hook
- secret := ""
- if s, ok := hook.Config["secret"]; ok {
- secret = s.(string)
- }
- body, err := parseValidateRequest(request, []byte(secret))
- if err != nil {
- logger.WithError(err).Error("request is not valid event notification")
- common.SendErrorResponse(writer, "")
- return
- }
-
- helper.ActiveEndpoints[r.Webhook.Endpoint].DataCh <- body
- logger.Info("request successfully processed")
- common.SendSuccessResponse(writer, "")
-}
diff --git a/gateways/community/github/validate.go b/gateways/community/github/validate.go
deleted file mode 100644
index f6f87aa575..0000000000
--- a/gateways/community/github/validate.go
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
-Copyright 2018 KompiTech GmbH
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package github
-
-import (
- "context"
- "fmt"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
-)
-
-// Validate validates github gateway configuration
-func (ese *GithubEventSourceExecutor) ValidateEventSource(ctx context.Context, es *gateways.EventSource) (*gateways.ValidEventSource, error) {
- return gwcommon.ValidateGatewayEventSource(es, ArgoEventsEventSourceVersion, parseEventSource, validateGithub)
-}
-
-func validateGithub(config interface{}) error {
- g := config.(*githubEventSource)
- if g == nil {
- return gwcommon.ErrNilEventSource
- }
- if g.Repository == "" {
- return fmt.Errorf("repository cannot be empty")
- }
- if g.Owner == "" {
- return fmt.Errorf("owner cannot be empty")
- }
- if g.APIToken == nil {
- return fmt.Errorf("api token can't be empty")
- }
- if g.Events == nil || len(g.Events) < 1 {
- return fmt.Errorf("events must be defined")
- }
- if g.ContentType != "" {
- if !(g.ContentType == "json" || g.ContentType == "form") {
- return fmt.Errorf("content type must be \"json\" or \"form\"")
- }
- }
- return gwcommon.ValidateWebhook(g.Hook)
-}
diff --git a/gateways/community/github/validate_test.go b/gateways/community/github/validate_test.go
deleted file mode 100644
index 5059b9c9e8..0000000000
--- a/gateways/community/github/validate_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package github
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "testing"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
-)
-
-func TestValidateGithubEventSource(t *testing.T) {
- convey.Convey("Given github event source spec, parse it and make sure no error occurs", t, func() {
- ese := &GithubEventSourceExecutor{}
- content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gwcommon.EventSourceDir, "github.yaml"))
- convey.So(err, convey.ShouldBeNil)
-
- var cm *corev1.ConfigMap
- err = yaml.Unmarshal(content, &cm)
- convey.So(err, convey.ShouldBeNil)
- convey.So(cm, convey.ShouldNotBeNil)
-
- err = common.CheckEventSourceVersion(cm)
- convey.So(err, convey.ShouldBeNil)
-
- for key, value := range cm.Data {
- valid, _ := ese.ValidateEventSource(context.Background(), &gateways.EventSource{
- Name: key,
- Id: common.Hasher(key),
- Data: value,
- Version: cm.Labels[common.LabelArgoEventsEventSourceVersion],
- })
- convey.So(valid, convey.ShouldNotBeNil)
- convey.So(valid.IsValid, convey.ShouldBeTrue)
- }
- })
-}
diff --git a/gateways/community/gitlab/cmd/main.go b/gateways/community/gitlab/cmd/main.go
deleted file mode 100644
index b7b372d5c9..0000000000
--- a/gateways/community/gitlab/cmd/main.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package main
-
-import (
- "os"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "github.com/argoproj/argo-events/gateways/community/gitlab"
- "k8s.io/client-go/kubernetes"
-)
-
-func main() {
- kubeConfig, _ := os.LookupEnv(common.EnvVarKubeConfig)
- restConfig, err := common.GetClientConfig(kubeConfig)
- if err != nil {
- panic(err)
- }
- clientset := kubernetes.NewForConfigOrDie(restConfig)
- namespace, ok := os.LookupEnv(common.EnvVarGatewayNamespace)
- if !ok {
- panic("namespace is not provided")
- }
- gateways.StartGateway(&gitlab.GitlabEventSourceExecutor{
- Log: common.NewArgoEventsLogger(),
- Namespace: namespace,
- Clientset: clientset,
- })
-}
diff --git a/gateways/community/gitlab/config.go b/gateways/community/gitlab/config.go
deleted file mode 100644
index 1184f1fcc7..0000000000
--- a/gateways/community/gitlab/config.go
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package gitlab
-
-import (
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/sirupsen/logrus"
- "github.com/xanzy/go-gitlab"
- "k8s.io/client-go/kubernetes"
-)
-
-const ArgoEventsEventSourceVersion = "v0.11"
-
-// GitlabEventSourceExecutor implements ConfigExecutor
-type GitlabEventSourceExecutor struct {
- Log *logrus.Logger
- // Clientset is kubernetes client
- Clientset kubernetes.Interface
- // Namespace where gateway is deployed
- Namespace string
-}
-
-// RouteConfig contains the configuration information for a route
-type RouteConfig struct {
- route *gwcommon.Route
- clientset kubernetes.Interface
- client *gitlab.Client
- hook *gitlab.ProjectHook
- namespace string
- ges *gitlabEventSource
-}
-
-// gitlabEventSource contains information to setup a gitlab project integration
-type gitlabEventSource struct {
- // Webhook
- Hook *gwcommon.Webhook `json:"hook"`
- // ProjectId is the id of project for which integration needs to setup
- ProjectId string `json:"projectId"`
- // Event is a gitlab event to listen to.
- // Refer https://github.com/xanzy/go-gitlab/blob/bf34eca5d13a9f4c3f501d8a97b8ac226d55e4d9/projects.go#L794.
- Event string `json:"event"`
- // AccessToken is reference to k8 secret which holds the gitlab api access information
- AccessToken *GitlabSecret `json:"accessToken"`
- // EnableSSLVerification to enable ssl verification
- EnableSSLVerification bool `json:"enableSSLVerification"`
- // GitlabBaseURL is the base URL for API requests to a custom endpoint
- GitlabBaseURL string `json:"gitlabBaseUrl"`
-}
-
-// GitlabSecret contains information of k8 secret which holds the gitlab api access information
-type GitlabSecret struct {
- // Key within the K8 secret for access token
- Key string
- // Name of K8 secret containing access token info
- Name string
-}
-
-// cred stores the api access token
-type cred struct {
- // token is gitlab api access token
- token string
-}
-
-// parseEventSource parses an event sources of gateway
-func parseEventSource(config string) (interface{}, error) {
- var g *gitlabEventSource
- err := yaml.Unmarshal([]byte(config), &g)
- if err != nil {
- return nil, err
- }
- return g, err
-}
diff --git a/gateways/community/gitlab/config_test.go b/gateways/community/gitlab/config_test.go
deleted file mode 100644
index 8496bda0bd..0000000000
--- a/gateways/community/gitlab/config_test.go
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package gitlab
-
-import (
- "testing"
-
- "github.com/smartystreets/goconvey/convey"
-)
-
-var es = `
-id: 12
-hook:
- endpoint: "/push"
- port: "12000"
- url: "http://webhook-gateway-gateway-svc/push"
-projectId: "28"
-event: "PushEvents"
-accessToken:
- key: accesskey
- name: gitlab-access
-enableSSLVerification: false
-gitlabBaseUrl: "http://gitlab.com/"
-`
-
-func TestParseConfig(t *testing.T) {
- convey.Convey("Given a gitlab event source, parse it", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
- _, ok := ps.(*gitlabEventSource)
- convey.So(ok, convey.ShouldEqual, true)
- })
-}
diff --git a/gateways/community/gitlab/start.go b/gateways/community/gitlab/start.go
deleted file mode 100644
index 4020ffc535..0000000000
--- a/gateways/community/gitlab/start.go
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package gitlab
-
-import (
- "fmt"
- "io/ioutil"
- "net/http"
- "reflect"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/argoproj/argo-events/store"
- "github.com/xanzy/go-gitlab"
-)
-
-var (
- helper = gwcommon.NewWebhookHelper()
-)
-
-func init() {
- go gwcommon.InitRouteChannels(helper)
-}
-
-// getCredentials for gitlab
-func (rc *RouteConfig) getCredentials(gs *GitlabSecret) (*cred, error) {
- token, err := store.GetSecrets(rc.clientset, rc.namespace, gs.Name, gs.Key)
- if err != nil {
- return nil, err
- }
- return &cred{
- token: token,
- }, nil
-}
-
-func (rc *RouteConfig) GetRoute() *gwcommon.Route {
- return rc.route
-}
-
-func (rc *RouteConfig) PostStart() error {
- c, err := rc.getCredentials(rc.ges.AccessToken)
- if err != nil {
- return fmt.Errorf("failed to get gitlab credentials. err: %+v", err)
- }
-
- rc.client = gitlab.NewClient(nil, c.token)
- if err = rc.client.SetBaseURL(rc.ges.GitlabBaseURL); err != nil {
- return fmt.Errorf("failed to set gitlab base url, err: %+v", err)
- }
-
- formattedUrl := gwcommon.GenerateFormattedURL(rc.ges.Hook)
-
- opt := &gitlab.AddProjectHookOptions{
- URL: &formattedUrl,
- Token: &c.token,
- EnableSSLVerification: &rc.ges.EnableSSLVerification,
- }
-
- elem := reflect.ValueOf(opt).Elem().FieldByName(string(rc.ges.Event))
- if ok := elem.IsValid(); !ok {
- return fmt.Errorf("unknown event %s", rc.ges.Event)
- }
-
- iev := reflect.New(elem.Type().Elem())
- reflect.Indirect(iev).SetBool(true)
- elem.Set(iev)
-
- hook, _, err := rc.client.Projects.AddProjectHook(rc.ges.ProjectId, opt)
- if err != nil {
- return fmt.Errorf("failed to add project hook. err: %+v", err)
- }
-
- rc.hook = hook
- rc.route.Logger.WithField(common.LabelEventSource, rc.route.EventSource.Name).Info("gitlab hook created")
- return nil
-}
-
-func (rc *RouteConfig) PostStop() error {
- if _, err := rc.client.Projects.DeleteProjectHook(rc.ges.ProjectId, rc.hook.ID); err != nil {
- return fmt.Errorf("failed to delete hook. err: %+v", err)
- }
- rc.route.Logger.WithField(common.LabelEventSource, rc.route.EventSource.Name).Info("gitlab hook deleted")
- return nil
-}
-
-// routeActiveHandler handles new route
-func (rc *RouteConfig) RouteHandler(writer http.ResponseWriter, request *http.Request) {
- r := rc.route
-
- log := r.Logger.WithFields(
- map[string]interface{}{
- common.LabelEventSource: r.EventSource.Name,
- common.LabelEndpoint: r.Webhook.Endpoint,
- common.LabelPort: r.Webhook.Port,
- })
-
- log.Info("request received")
-
- if !helper.ActiveEndpoints[r.Webhook.Endpoint].Active {
- log.Info("endpoint is not active")
- common.SendErrorResponse(writer, "")
- return
- }
-
- body, err := ioutil.ReadAll(request.Body)
- if err != nil {
- log.WithError(err).Error("failed to parse request body")
- common.SendErrorResponse(writer, "")
- return
- }
-
- helper.ActiveEndpoints[r.Webhook.Endpoint].DataCh <- body
- log.Info("request successfully processed")
- common.SendSuccessResponse(writer, "")
-}
-
-// StartEventSource starts an event source
-func (ese *GitlabEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
- defer gateways.Recover(eventSource.Name)
-
- log := ese.Log.WithField(common.LabelEventSource, eventSource.Name)
-
- log.Info("operating on event source")
- config, err := parseEventSource(eventSource.Data)
- if err != nil {
- log.WithError(err).Error("failed to parse event source")
- return err
- }
- gl := config.(*gitlabEventSource)
-
- return gwcommon.ProcessRoute(&RouteConfig{
- route: &gwcommon.Route{
- EventSource: eventSource,
- Logger: ese.Log,
- Webhook: gl.Hook,
- StartCh: make(chan struct{}),
- },
- namespace: ese.Namespace,
- clientset: ese.Clientset,
- ges: gl,
- }, helper, eventStream)
-}
diff --git a/gateways/community/gitlab/start_test.go b/gateways/community/gitlab/start_test.go
deleted file mode 100644
index 4c27de9979..0000000000
--- a/gateways/community/gitlab/start_test.go
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package gitlab
-
-import (
- "bytes"
- "github.com/xanzy/go-gitlab"
- "io/ioutil"
- "net/http"
- "testing"
-
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/client-go/kubernetes/fake"
-)
-
-var (
- rc = &RouteConfig{
- route: gwcommon.GetFakeRoute(),
- clientset: fake.NewSimpleClientset(),
- namespace: "fake",
- }
-
- secretName = "gitlab-access"
- accessKey = "YWNjZXNz"
- LabelAccessKey = "accesskey"
-)
-
-func TestGetCredentials(t *testing.T) {
- convey.Convey("Given a kubernetes secret, get credentials", t, func() {
- secret, err := rc.clientset.CoreV1().Secrets(rc.namespace).Create(&corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: secretName,
- Namespace: rc.namespace,
- },
- Data: map[string][]byte{
- LabelAccessKey: []byte(accessKey),
- },
- })
- convey.So(err, convey.ShouldBeNil)
- convey.So(secret, convey.ShouldNotBeNil)
-
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- creds, err := rc.getCredentials(ps.(*gitlabEventSource).AccessToken)
- convey.So(err, convey.ShouldBeNil)
- convey.So(creds, convey.ShouldNotBeNil)
- convey.So(creds.token, convey.ShouldEqual, "YWNjZXNz")
- })
-}
-
-func TestRouteActiveHandler(t *testing.T) {
- convey.Convey("Given a route configuration", t, func() {
- helper.ActiveEndpoints[rc.route.Webhook.Endpoint] = &gwcommon.Endpoint{
- DataCh: make(chan []byte),
- }
-
- convey.Convey("Inactive route should return error", func() {
- writer := &gwcommon.FakeHttpWriter{}
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- pbytes, err := yaml.Marshal(ps.(*gitlabEventSource))
- convey.So(err, convey.ShouldBeNil)
- rc.RouteHandler(writer, &http.Request{
- Body: ioutil.NopCloser(bytes.NewReader(pbytes)),
- })
- convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusBadRequest)
-
- convey.Convey("Active route should return success", func() {
- helper.ActiveEndpoints[rc.route.Webhook.Endpoint].Active = true
- rc.hook = &gitlab.ProjectHook{
- URL: "fake",
- PushEvents: true,
- }
- dataCh := make(chan []byte)
- go func() {
- resp := <-helper.ActiveEndpoints[rc.route.Webhook.Endpoint].DataCh
- dataCh <- resp
- }()
-
- rc.RouteHandler(writer, &http.Request{
- Body: ioutil.NopCloser(bytes.NewReader(pbytes)),
- })
-
- data := <-dataCh
- convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusOK)
- convey.So(string(data), convey.ShouldEqual, string(pbytes))
- rc.ges = ps.(*gitlabEventSource)
- err = rc.PostStart()
- convey.So(err, convey.ShouldNotBeNil)
- })
- })
- })
-}
diff --git a/gateways/community/gitlab/validate.go b/gateways/community/gitlab/validate.go
deleted file mode 100644
index b03eaf2dc8..0000000000
--- a/gateways/community/gitlab/validate.go
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package gitlab
-
-import (
- "context"
- "fmt"
-
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
-)
-
-// ValidateEventSource validates gitlab gateway event source
-func (ese *GitlabEventSourceExecutor) ValidateEventSource(ctx context.Context, es *gateways.EventSource) (*gateways.ValidEventSource, error) {
- return gwcommon.ValidateGatewayEventSource(es, ArgoEventsEventSourceVersion, parseEventSource, validateGitlab)
-}
-
-func validateGitlab(config interface{}) error {
- g := config.(*gitlabEventSource)
- if g == nil {
- return gwcommon.ErrNilEventSource
- }
- if g.ProjectId == "" {
- return fmt.Errorf("project id can't be empty")
- }
- if g.Event == "" {
- return fmt.Errorf("event type can't be empty")
- }
- if g.GitlabBaseURL == "" {
- return fmt.Errorf("gitlab base url can't be empty")
- }
- if g.AccessToken == nil {
- return fmt.Errorf("access token can't be nil")
- }
- return gwcommon.ValidateWebhook(g.Hook)
-}
diff --git a/gateways/community/gitlab/validate_test.go b/gateways/community/gitlab/validate_test.go
deleted file mode 100644
index c8ad5c89cf..0000000000
--- a/gateways/community/gitlab/validate_test.go
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package gitlab
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "testing"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
-)
-
-func TestValidateGitlabEventSource(t *testing.T) {
- convey.Convey("Given a gitlab event source spec, parse it and make sure no error occurs", t, func() {
- ese := &GitlabEventSourceExecutor{}
- content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gwcommon.EventSourceDir, "gitlab.yaml"))
- convey.So(err, convey.ShouldBeNil)
-
- var cm *corev1.ConfigMap
- err = yaml.Unmarshal(content, &cm)
- convey.So(err, convey.ShouldBeNil)
- convey.So(cm, convey.ShouldNotBeNil)
-
- err = common.CheckEventSourceVersion(cm)
- convey.So(err, convey.ShouldBeNil)
-
- for key, value := range cm.Data {
- valid, _ := ese.ValidateEventSource(context.Background(), &gateways.EventSource{
- Name: key,
- Id: common.Hasher(key),
- Data: value,
- Version: cm.Labels[common.LabelArgoEventsEventSourceVersion],
- })
- convey.So(valid, convey.ShouldNotBeNil)
- convey.Println(valid.Reason)
- convey.So(valid.IsValid, convey.ShouldBeTrue)
- }
- })
-}
diff --git a/gateways/community/hdfs/cmd/main.go b/gateways/community/hdfs/cmd/main.go
deleted file mode 100644
index d84824e0a7..0000000000
--- a/gateways/community/hdfs/cmd/main.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package main
-
-import (
- "os"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "github.com/argoproj/argo-events/gateways/community/hdfs"
- "k8s.io/client-go/kubernetes"
-)
-
-func main() {
- kubeConfig, _ := os.LookupEnv(common.EnvVarKubeConfig)
- restConfig, err := common.GetClientConfig(kubeConfig)
- if err != nil {
- panic(err)
- }
- clientset := kubernetes.NewForConfigOrDie(restConfig)
- namespace, ok := os.LookupEnv(common.EnvVarGatewayNamespace)
- if !ok {
- panic("namespace is not provided")
- }
- gateways.StartGateway(&hdfs.EventSourceExecutor{
- Log: common.NewArgoEventsLogger(),
- Namespace: namespace,
- Clientset: clientset,
- })
-}
diff --git a/gateways/community/hdfs/config.go b/gateways/community/hdfs/config.go
deleted file mode 100644
index de93b8daf1..0000000000
--- a/gateways/community/hdfs/config.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package hdfs
-
-import (
- "errors"
- "github.com/sirupsen/logrus"
-
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/client-go/kubernetes"
-)
-
-const ArgoEventsEventSourceVersion = "v0.11"
-
-// EventSourceExecutor implements Eventing
-type EventSourceExecutor struct {
- Log *logrus.Logger
- // Clientset is kubernetes client
- Clientset kubernetes.Interface
- // Namespace where gateway is deployed
- Namespace string
-}
-
-// GatewayConfig contains information to setup a HDFS integration
-type GatewayConfig struct {
- gwcommon.WatchPathConfig `json:",inline"`
-
- // Type of file operations to watch
- Type string `json:"type"`
- // CheckInterval is a string that describes an interval duration to check the directory state, e.g. 1s, 30m, 2h... (defaults to 1m)
- CheckInterval string `json:"checkInterval,omitempty"`
-
- GatewayClientConfig `json:",inline"`
-}
-
-// GatewayClientConfig contains HDFS client configurations
-type GatewayClientConfig struct {
- // Addresses is accessible addresses of HDFS name nodes
- Addresses []string `json:"addresses"`
-
- // HDFSUser is the user to access HDFS file system.
- // It is ignored if either ccache or keytab is used.
- HDFSUser string `json:"hdfsUser,omitempty"`
-
- // KrbCCacheSecret is the secret selector for Kerberos ccache
- // Either ccache or keytab can be set to use Kerberos.
- KrbCCacheSecret *corev1.SecretKeySelector `json:"krbCCacheSecret,omitempty"`
-
- // KrbKeytabSecret is the secret selector for Kerberos keytab
- // Either ccache or keytab can be set to use Kerberos.
- KrbKeytabSecret *corev1.SecretKeySelector `json:"krbKeytabSecret,omitempty"`
-
- // KrbUsername is the Kerberos username used with Kerberos keytab
- // It must be set if keytab is used.
- KrbUsername string `json:"krbUsername,omitempty"`
-
- // KrbRealm is the Kerberos realm used with Kerberos keytab
- // It must be set if keytab is used.
- KrbRealm string `json:"krbRealm,omitempty"`
-
- // KrbConfig is the configmap selector for Kerberos config as string
- // It must be set if either ccache or keytab is used.
- KrbConfigConfigMap *corev1.ConfigMapKeySelector `json:"krbConfigConfigMap,omitempty"`
-
- // KrbServicePrincipalName is the principal name of Kerberos service
- // It must be set if either ccache or keytab is used.
- KrbServicePrincipalName string `json:"krbServicePrincipalName,omitempty"`
-}
-
-func parseEventSource(eventSource string) (interface{}, error) {
- var f *GatewayConfig
- err := yaml.Unmarshal([]byte(eventSource), &f)
- if err != nil {
- return nil, err
- }
- return f, err
-}
-
-// Validate validates GatewayClientConfig
-func (c *GatewayClientConfig) Validate() error {
- if len(c.Addresses) == 0 {
- return errors.New("addresses is required")
- }
-
- hasKrbCCache := c.KrbCCacheSecret != nil
- hasKrbKeytab := c.KrbKeytabSecret != nil
-
- if c.HDFSUser == "" && !hasKrbCCache && !hasKrbKeytab {
- return errors.New("either hdfsUser, krbCCacheSecret or krbKeytabSecret is required")
- }
- if hasKrbKeytab && (c.KrbServicePrincipalName == "" || c.KrbConfigConfigMap == nil || c.KrbUsername == "" || c.KrbRealm == "") {
- return errors.New("krbServicePrincipalName, krbConfigConfigMap, krbUsername and krbRealm are required with krbKeytabSecret")
- }
- if hasKrbCCache && (c.KrbServicePrincipalName == "" || c.KrbConfigConfigMap == nil) {
- return errors.New("krbServicePrincipalName and krbConfigConfigMap are required with krbCCacheSecret")
- }
-
- return nil
-}
diff --git a/gateways/community/hdfs/start.go b/gateways/community/hdfs/start.go
deleted file mode 100644
index fb68c90df3..0000000000
--- a/gateways/community/hdfs/start.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package hdfs
-
-import (
- "encoding/json"
- "fmt"
- "github.com/argoproj/argo-events/common"
- "os"
- "path/filepath"
- "regexp"
- "strings"
- "time"
-
- "github.com/argoproj/argo-events/gateways"
- "github.com/argoproj/argo-events/gateways/common/fsevent"
- "github.com/argoproj/argo-events/gateways/common/naivewatcher"
- "github.com/colinmarc/hdfs"
-)
-
-// WatchableHDFS wraps hdfs.Client for naivewatcher
-type WatchableHDFS struct {
- hdfscli *hdfs.Client
-}
-
-// Walk walks a directory
-func (w *WatchableHDFS) Walk(root string, walkFn filepath.WalkFunc) error {
- return w.hdfscli.Walk(root, walkFn)
-}
-
-// GetFileID returns the file ID
-func (w *WatchableHDFS) GetFileID(fi os.FileInfo) interface{} {
- return fi.Name()
- // FIXME: Use HDFS File ID once it's exposed
- // https://github.com/colinmarc/hdfs/pull/171
- // return fi.Sys().(*hadoop_hdfs.HdfsFileStatusProto).GetFileID()
-}
-
-// StartEventSource starts an event source
-func (ese *EventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
- defer gateways.Recover(eventSource.Name)
-
- ese.Log.WithField(common.LabelEventSource, eventSource.Name).Info("activating event source")
- config, err := parseEventSource(eventSource.Data)
- if err != nil {
- return err
- }
- gwc := config.(*GatewayConfig)
-
- dataCh := make(chan []byte)
- errorCh := make(chan error)
- doneCh := make(chan struct{}, 1)
-
- go ese.listenEvents(gwc, eventSource, dataCh, errorCh, doneCh)
-
- return gateways.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, ese.Log)
-}
-
-func (ese *EventSourceExecutor) listenEvents(config *GatewayConfig, eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
- defer gateways.Recover(eventSource.Name)
-
- log := ese.Log.WithField(common.LabelEventSource, eventSource.Name)
-
- hdfsConfig, err := createHDFSConfig(ese.Clientset, ese.Namespace, &config.GatewayClientConfig)
- if err != nil {
- errorCh <- err
- return
- }
-
- hdfscli, err := createHDFSClient(hdfsConfig.Addresses, hdfsConfig.HDFSUser, hdfsConfig.KrbOptions)
- if err != nil {
- errorCh <- err
- return
- }
- defer hdfscli.Close()
-
- // create new watcher
- watcher, err := naivewatcher.NewWatcher(&WatchableHDFS{hdfscli: hdfscli})
- if err != nil {
- errorCh <- err
- return
- }
- defer watcher.Close()
-
- intervalDuration := 1 * time.Minute
- if config.CheckInterval != "" {
- d, err := time.ParseDuration(config.CheckInterval)
- if err != nil {
- errorCh <- err
- return
- }
- intervalDuration = d
- }
-
- err = watcher.Start(intervalDuration)
- if err != nil {
- errorCh <- err
- return
- }
-
- // directory to watch must be available in HDFS. You can't watch a directory that is not present.
- err = watcher.Add(config.Directory)
- if err != nil {
- errorCh <- err
- return
- }
-
- op := fsevent.NewOp(config.Type)
- var pathRegexp *regexp.Regexp
- if config.PathRegexp != "" {
- pathRegexp, err = regexp.Compile(config.PathRegexp)
- if err != nil {
- errorCh <- err
- return
- }
- }
- log.Info("starting to watch to HDFS notifications")
- for {
- select {
- case event, ok := <-watcher.Events:
- if !ok {
- log.Info("HDFS watcher has stopped")
- // watcher stopped watching file events
- errorCh <- fmt.Errorf("HDFS watcher stopped")
- return
- }
- matched := false
- relPath := strings.TrimPrefix(event.Name, config.Directory)
- if config.Path != "" && config.Path == relPath {
- matched = true
- } else if pathRegexp != nil && pathRegexp.MatchString(relPath) {
- matched = true
- }
- if matched && (op&event.Op != 0) {
- log.WithFields(
- map[string]interface{}{
- "event-type": event.Op.String(),
- "descriptor-name": event.Name,
- },
- ).Debug("HDFS event")
-
- payload, err := json.Marshal(event)
- if err != nil {
- errorCh <- err
- return
- }
- dataCh <- payload
- }
- case err := <-watcher.Errors:
- errorCh <- err
- return
- case <-doneCh:
- return
- }
- }
-}
diff --git a/gateways/community/hdfs/validate.go b/gateways/community/hdfs/validate.go
deleted file mode 100644
index 068245b543..0000000000
--- a/gateways/community/hdfs/validate.go
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package hdfs
-
-import (
- "context"
- "errors"
- "time"
-
- "github.com/argoproj/argo-events/gateways/common/fsevent"
-
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
-)
-
-// ValidateEventSource validates gateway event source
-func (ese *EventSourceExecutor) ValidateEventSource(ctx context.Context, es *gateways.EventSource) (*gateways.ValidEventSource, error) {
- return gwcommon.ValidateGatewayEventSource(es, ArgoEventsEventSourceVersion, parseEventSource, validateGatewayConfig)
-}
-
-func validateGatewayConfig(config interface{}) error {
- gwc := config.(*GatewayConfig)
- if gwc == nil {
- return gwcommon.ErrNilEventSource
- }
- if gwc.Type == "" {
- return errors.New("type is required")
- }
- op := fsevent.NewOp(gwc.Type)
- if op == 0 {
- return errors.New("type is invalid")
- }
- if gwc.CheckInterval != "" {
- _, err := time.ParseDuration(gwc.CheckInterval)
- if err != nil {
- return errors.New("failed to parse interval")
- }
- }
- err := gwc.WatchPathConfig.Validate()
- if err != nil {
- return err
- }
- err = gwc.GatewayClientConfig.Validate()
- return err
-}
diff --git a/gateways/community/hdfs/validate_test.go b/gateways/community/hdfs/validate_test.go
deleted file mode 100644
index 3c03f52eb0..0000000000
--- a/gateways/community/hdfs/validate_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package hdfs
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "testing"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
-)
-
-func TestValidateEventSource(t *testing.T) {
- convey.Convey("Given a hdfs event source spec, parse it and make sure no error occurs", t, func() {
- ese := &EventSourceExecutor{}
- content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gwcommon.EventSourceDir, "hdfs.yaml"))
- convey.So(err, convey.ShouldBeNil)
-
- var cm *corev1.ConfigMap
- err = yaml.Unmarshal(content, &cm)
- convey.So(err, convey.ShouldBeNil)
- convey.So(cm, convey.ShouldNotBeNil)
-
- err = common.CheckEventSourceVersion(cm)
- convey.So(err, convey.ShouldBeNil)
-
- for key, value := range cm.Data {
- valid, _ := ese.ValidateEventSource(context.Background(), &gateways.EventSource{
- Name: key,
- Id: common.Hasher(key),
- Data: value,
- Version: cm.Labels[common.LabelArgoEventsEventSourceVersion],
- })
- convey.So(valid, convey.ShouldNotBeNil)
- convey.So(valid.IsValid, convey.ShouldBeTrue)
- }
- })
-}
diff --git a/gateways/community/slack/config.go b/gateways/community/slack/config.go
deleted file mode 100644
index 7455d4ec19..0000000000
--- a/gateways/community/slack/config.go
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package slack
-
-import (
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/sirupsen/logrus"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/client-go/kubernetes"
-)
-
-const ArgoEventsEventSourceVersion = "v0.11"
-
-// SlackEventSourceExecutor implements Eventing
-type SlackEventSourceExecutor struct {
- // Clientset is kubernetes client
- Clientset kubernetes.Interface
- // Namespace where gateway is deployed
- Namespace string
- Log *logrus.Logger
-}
-
-type RouteConfig struct {
- route *gwcommon.Route
- ses *slackEventSource
- token string
- signingSecret string
- clientset kubernetes.Interface
- namespace string
-}
-
-type slackEventSource struct {
- // Slack App signing secret
- SigningSecret *corev1.SecretKeySelector `json:"signingSecret,omitempty"`
- // Token for URL verification handshake
- Token *corev1.SecretKeySelector `json:"token"`
- // Webhook
- Hook *gwcommon.Webhook `json:"hook"`
-}
-
-func parseEventSource(es string) (interface{}, error) {
- var n *slackEventSource
- err := yaml.Unmarshal([]byte(es), &n)
- if err != nil {
- return nil, err
- }
- return n, nil
-}
diff --git a/gateways/community/slack/validate.go b/gateways/community/slack/validate.go
deleted file mode 100644
index 4c9637d82a..0000000000
--- a/gateways/community/slack/validate.go
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package slack
-
-import (
- "context"
- "fmt"
-
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
-)
-
-// ValidateEventSource validates gateway event source
-func (ese *SlackEventSourceExecutor) ValidateEventSource(ctx context.Context, es *gateways.EventSource) (*gateways.ValidEventSource, error) {
- return gwcommon.ValidateGatewayEventSource(es, ArgoEventsEventSourceVersion, parseEventSource, validateSlack)
-}
-
-func validateSlack(config interface{}) error {
- sc := config.(*slackEventSource)
- if sc == nil {
- return gwcommon.ErrNilEventSource
- }
- if sc.Token == nil {
- return fmt.Errorf("token not provided")
- }
- return gwcommon.ValidateWebhook(sc.Hook)
-}
diff --git a/gateways/community/slack/validate_test.go b/gateways/community/slack/validate_test.go
deleted file mode 100644
index f9b1927279..0000000000
--- a/gateways/community/slack/validate_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package slack
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "testing"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
-)
-
-func TestSlackEventSource(t *testing.T) {
- convey.Convey("Given a slack event source spec, parse it and make sure no error occurs", t, func() {
- ese := &SlackEventSourceExecutor{}
- content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gwcommon.EventSourceDir, "slack.yaml"))
- convey.So(err, convey.ShouldBeNil)
-
- var cm *corev1.ConfigMap
- err = yaml.Unmarshal(content, &cm)
- convey.So(err, convey.ShouldBeNil)
- convey.So(cm, convey.ShouldNotBeNil)
-
- err = common.CheckEventSourceVersion(cm)
- convey.So(err, convey.ShouldBeNil)
-
- for key, value := range cm.Data {
- valid, _ := ese.ValidateEventSource(context.Background(), &gateways.EventSource{
- Name: key,
- Id: common.Hasher(key),
- Data: value,
- Version: cm.Labels[common.LabelArgoEventsEventSourceVersion],
- })
- convey.So(valid, convey.ShouldNotBeNil)
- convey.So(valid.IsValid, convey.ShouldBeTrue)
- }
- })
-}
diff --git a/gateways/community/storagegrid/cmd/main.go b/gateways/community/storagegrid/cmd/main.go
deleted file mode 100644
index 8e066c88eb..0000000000
--- a/gateways/community/storagegrid/cmd/main.go
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package main
-
-import (
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "github.com/argoproj/argo-events/gateways/community/storagegrid"
-)
-
-func main() {
- gateways.StartGateway(&storagegrid.StorageGridEventSourceExecutor{
- Log: common.NewArgoEventsLogger(),
- })
-}
diff --git a/gateways/community/storagegrid/config_test.go b/gateways/community/storagegrid/config_test.go
deleted file mode 100644
index dac18610eb..0000000000
--- a/gateways/community/storagegrid/config_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package storagegrid
-
-import (
- "github.com/smartystreets/goconvey/convey"
- "testing"
-)
-
-var es = `
-hook:
- endpoint: "/"
- port: "8080"
- url: "testurl"
-events:
- - "ObjectCreated:Put"
-filter:
- suffix: ".txt"
- prefix: "hello-"
-`
-
-func TestParseConfig(t *testing.T) {
- convey.Convey("Given a storage grid event source, parse it", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
- _, ok := ps.(*storageGridEventSource)
- convey.So(ok, convey.ShouldEqual, true)
- })
-}
diff --git a/gateways/community/storagegrid/start.go b/gateways/community/storagegrid/start.go
deleted file mode 100644
index 0ae6320795..0000000000
--- a/gateways/community/storagegrid/start.go
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package storagegrid
-
-import (
- "encoding/json"
- "io/ioutil"
- "net/http"
- "net/url"
- "strings"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/joncalhoun/qson"
- "github.com/google/uuid"
-)
-
-var (
- helper = gwcommon.NewWebhookHelper()
-
- respBody = `
-
-
- ` + generateUUID().String() + `
-
-
- ` + generateUUID().String() + `
-
-` + "\n"
-)
-
-func init() {
- go gwcommon.InitRouteChannels(helper)
-}
-
-// generateUUID returns a new uuid
-func generateUUID() uuid.UUID {
- return uuid.New()
-}
-
-// filterEvent filters notification based on event filter in a gateway configuration
-func filterEvent(notification *storageGridNotification, sg *storageGridEventSource) bool {
- if sg.Events == nil {
- return true
- }
- for _, filterEvent := range sg.Events {
- if notification.Message.Records[0].EventName == filterEvent {
- return true
- }
- }
- return false
-}
-
-// filterName filters object key based on configured prefix and/or suffix
-func filterName(notification *storageGridNotification, sg *storageGridEventSource) bool {
- if sg.Filter == nil {
- return true
- }
- if sg.Filter.Prefix != "" && sg.Filter.Suffix != "" {
- return strings.HasPrefix(notification.Message.Records[0].S3.Object.Key, sg.Filter.Prefix) && strings.HasSuffix(notification.Message.Records[0].S3.Object.Key, sg.Filter.Suffix)
- }
- if sg.Filter.Prefix != "" {
- return strings.HasPrefix(notification.Message.Records[0].S3.Object.Key, sg.Filter.Prefix)
- }
- if sg.Filter.Suffix != "" {
- return strings.HasSuffix(notification.Message.Records[0].S3.Object.Key, sg.Filter.Suffix)
- }
- return true
-}
-
-func (rc *RouteConfig) GetRoute() *gwcommon.Route {
- return rc.route
-}
-
-// StartConfig runs a configuration
-func (ese *StorageGridEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
- defer gateways.Recover(eventSource.Name)
-
- log := ese.Log.WithField(common.LabelEventSource, eventSource.Name)
-
- log.Info("operating on event source")
- config, err := parseEventSource(eventSource.Data)
- if err != nil {
- log.WithError(err).Error("failed to parse event source")
- return err
- }
- sges := config.(*storageGridEventSource)
-
- return gwcommon.ProcessRoute(&RouteConfig{
- route: &gwcommon.Route{
- Webhook: sges.Hook,
- EventSource: eventSource,
- Logger: ese.Log,
- StartCh: make(chan struct{}),
- },
- sges: sges,
- }, helper, eventStream)
-}
-
-func (rc *RouteConfig) PostStart() error {
- return nil
-}
-
-func (rc *RouteConfig) PostStop() error {
- return nil
-}
-
-// RouteHandler handles new route
-func (rc *RouteConfig) RouteHandler(writer http.ResponseWriter, request *http.Request) {
- r := rc.route
-
- log := r.Logger.WithFields(
- map[string]interface{}{
- common.LabelEventSource: r.EventSource.Name,
- common.LabelEndpoint: r.Webhook.Endpoint,
- common.LabelPort: r.Webhook.Port,
- common.LabelHTTPMethod: r.Webhook.Method,
- })
-
- if !helper.ActiveEndpoints[r.Webhook.Endpoint].Active {
- log.Warn("inactive route")
- common.SendErrorResponse(writer, "")
- return
- }
-
- log.Info("received a request")
- body, err := ioutil.ReadAll(request.Body)
- if err != nil {
- log.WithError(err).Error("failed to parse request body")
- common.SendErrorResponse(writer, "")
- return
- }
-
- switch request.Method {
- case http.MethodHead:
- respBody = ""
- }
- writer.WriteHeader(http.StatusOK)
- writer.Header().Add("Content-Type", "text/plain")
- writer.Write([]byte(respBody))
-
- // notification received from storage grid is url encoded.
- parsedURL, err := url.QueryUnescape(string(body))
- if err != nil {
- log.WithError(err).Error("failed to unescape request body url")
- return
- }
- b, err := qson.ToJSON(parsedURL)
- if err != nil {
- log.WithError(err).Error("failed to convert request body in JSON format")
- return
- }
-
- var notification *storageGridNotification
- err = json.Unmarshal(b, ¬ification)
- if err != nil {
- log.WithError(err).Error("failed to unmarshal request body")
- return
- }
-
- if filterEvent(notification, rc.sges) && filterName(notification, rc.sges) {
- log.WithError(err).Error("new event received, dispatching to gateway client")
- helper.ActiveEndpoints[rc.route.Webhook.Endpoint].DataCh <- b
- return
- }
-
- log.Warn("discarding notification since it did not pass all filters")
-}
diff --git a/gateways/community/storagegrid/validate.go b/gateways/community/storagegrid/validate.go
deleted file mode 100644
index 45c1e2b380..0000000000
--- a/gateways/community/storagegrid/validate.go
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package storagegrid
-
-import (
- "context"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
-)
-
-// ValidateEventSource validates gateway event source
-func (ese *StorageGridEventSourceExecutor) ValidateEventSource(ctx context.Context, es *gateways.EventSource) (*gateways.ValidEventSource, error) {
- return gwcommon.ValidateGatewayEventSource(es, ArgoEventsEventSourceVersion, parseEventSource, validateStorageGrid)
-}
-
-func validateStorageGrid(config interface{}) error {
- sg := config.(*storageGridEventSource)
- if sg == nil {
- return gwcommon.ErrNilEventSource
- }
- return gwcommon.ValidateWebhook(sg.Hook)
-}
diff --git a/gateways/community/storagegrid/validate_test.go b/gateways/community/storagegrid/validate_test.go
deleted file mode 100644
index 1cdc2a367e..0000000000
--- a/gateways/community/storagegrid/validate_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package storagegrid
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "testing"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
-)
-
-func TestValidateStorageGridEventSource(t *testing.T) {
- convey.Convey("Given a storage grid event source spec, parse it and make sure no error occurs", t, func() {
- ese := &StorageGridEventSourceExecutor{}
- content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gwcommon.EventSourceDir, "storage-grid.yaml"))
- convey.So(err, convey.ShouldBeNil)
-
- var cm *corev1.ConfigMap
- err = yaml.Unmarshal(content, &cm)
- convey.So(err, convey.ShouldBeNil)
- convey.So(cm, convey.ShouldNotBeNil)
-
- err = common.CheckEventSourceVersion(cm)
- convey.So(err, convey.ShouldBeNil)
-
- for key, value := range cm.Data {
- valid, _ := ese.ValidateEventSource(context.Background(), &gateways.EventSource{
- Name: key,
- Id: common.Hasher(key),
- Data: value,
- Version: cm.Labels[common.LabelArgoEventsEventSourceVersion],
- })
- convey.So(valid, convey.ShouldNotBeNil)
- convey.So(valid.IsValid, convey.ShouldBeTrue)
- }
- })
-}
diff --git a/gateways/config.go b/gateways/config.go
deleted file mode 100644
index 5e20028d00..0000000000
--- a/gateways/config.go
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package gateways
-
-import (
- "context"
- "fmt"
- "github.com/sirupsen/logrus"
- "os"
-
- "github.com/nats-io/go-nats"
-
- "github.com/argoproj/argo-events/common"
- pc "github.com/argoproj/argo-events/pkg/apis/common"
- "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
- gwclientset "github.com/argoproj/argo-events/pkg/client/gateway/clientset/versioned"
- snats "github.com/nats-io/go-nats-streaming"
- "google.golang.org/grpc"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/client-go/kubernetes"
- "k8s.io/client-go/rest"
-)
-
-// GatewayConfig provides a generic event source for a gateway
-type GatewayConfig struct {
- // Log provides fast and simple logger dedicated to JSON output
- Log *logrus.Logger
- // Clientset is client for kubernetes API
- Clientset kubernetes.Interface
- // Name is gateway name
- Name string
- // Namespace is namespace for the gateway to run inside
- Namespace string
- // KubeConfig rest client config
- KubeConfig *rest.Config
- // gateway holds Gateway custom resource
- gw *v1alpha1.Gateway
- // gwClientset is gateway clientset
- gwcs gwclientset.Interface
- // updated indicates whether gateway resource is updated
- updated bool
- // serverPort is gateway server port to listen events from
- serverPort string
- // registeredConfigs stores information about current event sources that are running in the gateway
- registeredConfigs map[string]*EventSourceContext
- // configName is name of configmap that contains run event source/s for the gateway
- configName string
- // controllerInstanceId is instance ID of the gateway controller
- controllerInstanceID string
- // StatusCh is used to communicate the status of an event source
- StatusCh chan EventSourceStatus
- // natsConn is the standard nats connection used to publish events to cluster. Only used if dispatch protocol is NATS
- natsConn *nats.Conn
- // natsStreamingConn is the nats connection used for streaming.
- natsStreamingConn snats.Conn
- // sensorHttpPort is the http server running in sensor that listens to event. Only used if dispatch protocol is HTTP
- sensorHttpPort string
-}
-
-// EventSourceContext contains information of a event source for gateway to run.
-type EventSourceContext struct {
- // Source holds the actual event source
- Source *EventSource
- // Ctx contains context for the connection
- Ctx context.Context
- // Cancel upon invocation cancels the connection context
- Cancel context.CancelFunc
- // Client is grpc client
- Client EventingClient
- // Conn is grpc connection
- Conn *grpc.ClientConn
-}
-
-// GatewayEvent is the internal representation of an event.
-type GatewayEvent struct {
- // Src is source of event
- Src string `json:"src"`
- // Payload contains event data
- Payload []byte `json:"payload"`
-}
-
-// NewGatewayConfiguration returns a new gateway configuration
-func NewGatewayConfiguration() *GatewayConfig {
- kubeConfig, _ := os.LookupEnv(common.EnvVarKubeConfig)
- restConfig, err := common.GetClientConfig(kubeConfig)
- if err != nil {
- panic(err)
- }
- name, ok := os.LookupEnv(common.EnvVarGatewayName)
- if !ok {
- panic("gateway name not provided")
- }
- namespace, ok := os.LookupEnv(common.EnvVarGatewayNamespace)
- if !ok {
- panic("no namespace provided")
- }
- configName, ok := os.LookupEnv(common.EnvVarGatewayEventSourceConfigMap)
- if !ok {
- panic("gateway processor configmap is not provided")
- }
- controllerInstanceID, ok := os.LookupEnv(common.EnvVarGatewayControllerInstanceID)
- if !ok {
- panic("gateway controller instance ID is not provided")
- }
- serverPort, ok := os.LookupEnv(common.EnvVarGatewayServerPort)
- if !ok {
- panic("server port is not provided")
- }
-
- clientset := kubernetes.NewForConfigOrDie(restConfig)
- gwcs := gwclientset.NewForConfigOrDie(restConfig)
- gw, err := gwcs.ArgoprojV1alpha1().Gateways(namespace).Get(name, metav1.GetOptions{})
- if err != nil {
- panic(err)
- }
-
- gc := &GatewayConfig{
- Log: common.NewArgoEventsLogger().WithFields(
- map[string]interface{}{
- common.LabelGatewayName: gw.Name,
- common.LabelNamespace: gw.Namespace,
- }).Logger,
- Clientset: clientset,
- Namespace: namespace,
- Name: name,
- KubeConfig: restConfig,
- registeredConfigs: make(map[string]*EventSourceContext),
- configName: configName,
- gwcs: gwcs,
- gw: gw,
- controllerInstanceID: controllerInstanceID,
- serverPort: serverPort,
- StatusCh: make(chan EventSourceStatus),
- }
-
- switch gw.Spec.EventProtocol.Type {
- case pc.HTTP:
- gc.sensorHttpPort = gw.Spec.EventProtocol.Http.Port
- case pc.NATS:
- if gc.natsConn, err = nats.Connect(gw.Spec.EventProtocol.Nats.URL); err != nil {
- panic(fmt.Errorf("failed to obtain NATS standard connection. err: %+v", err))
- }
- gc.Log.WithField(common.LabelURL, gw.Spec.EventProtocol.Nats.URL).Info("connected to nats service")
-
- if gc.gw.Spec.EventProtocol.Nats.Type == pc.Streaming {
- gc.natsStreamingConn, err = snats.Connect(gc.gw.Spec.EventProtocol.Nats.ClusterId, gc.gw.Spec.EventProtocol.Nats.ClientId, snats.NatsConn(gc.natsConn))
- if err != nil {
- panic(fmt.Errorf("failed to obtain NATS streaming connection. err: %+v", err))
- }
- gc.Log.WithField(common.LabelURL, gw.Spec.EventProtocol.Nats.URL).Info("nats streaming connection successful")
- }
- }
- return gc
-}
diff --git a/gateways/core/artifact/Dockerfile b/gateways/core/artifact/Dockerfile
deleted file mode 100644
index 7e1d5661d1..0000000000
--- a/gateways/core/artifact/Dockerfile
+++ /dev/null
@@ -1,3 +0,0 @@
-FROM centos:7
-COPY dist/artifact-gateway /bin/
-ENTRYPOINT [ "/bin/artifact-gateway" ]
\ No newline at end of file
diff --git a/gateways/core/artifact/config.go b/gateways/core/artifact/config.go
deleted file mode 100644
index db8b90c06d..0000000000
--- a/gateways/core/artifact/config.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package artifact
-
-import (
- apicommon "github.com/argoproj/argo-events/pkg/apis/common"
- "github.com/ghodss/yaml"
- "github.com/sirupsen/logrus"
- "k8s.io/client-go/kubernetes"
-)
-
-const ArgoEventsEventSourceVersion = "v0.11"
-
-// S3EventSourceExecutor implements Eventing
-type S3EventSourceExecutor struct {
- Log *logrus.Logger
- // Clientset is kubernetes client
- Clientset kubernetes.Interface
- // Namespace where gateway is deployed
- Namespace string
-}
-
-func parseEventSource(config string) (interface{}, error) {
- var a *apicommon.S3Artifact
- err := yaml.Unmarshal([]byte(config), &a)
- if err != nil {
- return nil, err
- }
- return a, err
-}
diff --git a/gateways/core/artifact/config_test.go b/gateways/core/artifact/config_test.go
deleted file mode 100644
index ac73c722aa..0000000000
--- a/gateways/core/artifact/config_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package artifact
-
-import (
- "testing"
-
- apicommon "github.com/argoproj/argo-events/pkg/apis/common"
- "github.com/smartystreets/goconvey/convey"
-)
-
-var es = `
-bucket:
- name: input
-endpoint: minio-service.argo-events:9000
-event: s3:ObjectCreated:Put
-filter:
- prefix: ""
- suffix: ""
-insecure: true
-accessKey:
- key: accesskey
- name: artifacts-minio
-secretKey:
- key: secretkey
- name: artifacts-minio
-`
-
-func TestParseConfig(t *testing.T) {
- convey.Convey("Given a artifact event source, parse it", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
- _, ok := ps.(*apicommon.S3Artifact)
- convey.So(ok, convey.ShouldEqual, true)
- })
-}
diff --git a/gateways/core/artifact/start.go b/gateways/core/artifact/start.go
deleted file mode 100644
index d163a6a812..0000000000
--- a/gateways/core/artifact/start.go
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package artifact
-
-import (
- "encoding/json"
- "github.com/argoproj/argo-events/common"
-
- "github.com/argoproj/argo-events/gateways"
- apicommon "github.com/argoproj/argo-events/pkg/apis/common"
- "github.com/argoproj/argo-events/store"
- "github.com/minio/minio-go"
-)
-
-// StartEventSource activates an event source and streams back events
-func (ese *S3EventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
- log := ese.Log.WithField(common.LabelEventSource, eventSource.Name)
- log.Info("activating event source")
-
- config, err := parseEventSource(eventSource.Data)
- if err != nil {
- log.WithError(err).Error("failed to parse event source")
- return err
- }
-
- dataCh := make(chan []byte)
- errorCh := make(chan error)
- doneCh := make(chan struct{}, 1)
-
- go ese.listenEvents(config.(*apicommon.S3Artifact), eventSource, dataCh, errorCh, doneCh)
-
- return gateways.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, ese.Log)
-}
-
-// listenEvents listens to minio bucket notifications
-func (ese *S3EventSourceExecutor) listenEvents(a *apicommon.S3Artifact, eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
- defer gateways.Recover(eventSource.Name)
- log := ese.Log.WithField(common.LabelEventSource, eventSource.Name)
-
- log.Info("operating on event source")
-
- log.Info("retrieving access and secret key")
- // retrieve access key id and secret access key
- accessKey, err := store.GetSecrets(ese.Clientset, ese.Namespace, a.AccessKey.Name, a.AccessKey.Key)
- if err != nil {
- errorCh <- err
- return
- }
- secretKey, err := store.GetSecrets(ese.Clientset, ese.Namespace, a.SecretKey.Name, a.SecretKey.Key)
- if err != nil {
- errorCh <- err
- return
- }
-
- minioClient, err := minio.New(a.Endpoint, accessKey, secretKey, !a.Insecure)
- if err != nil {
- errorCh <- err
- return
- }
-
- log.Info("starting to listen to bucket notifications")
- for notification := range minioClient.ListenBucketNotification(a.Bucket.Name, a.Filter.Prefix, a.Filter.Suffix, a.Events, doneCh) {
- if notification.Err != nil {
- errorCh <- notification.Err
- return
- }
- payload, err := json.Marshal(notification.Records[0])
- if err != nil {
- errorCh <- err
- return
- }
- dataCh <- payload
- }
-}
diff --git a/gateways/core/artifact/validate.go b/gateways/core/artifact/validate.go
deleted file mode 100644
index c73f24fd52..0000000000
--- a/gateways/core/artifact/validate.go
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package artifact
-
-import (
- "context"
- "fmt"
-
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- apicommon "github.com/argoproj/argo-events/pkg/apis/common"
- "github.com/minio/minio-go"
-)
-
-// ValidateEventSource validates a s3 event source
-func (ese *S3EventSourceExecutor) ValidateEventSource(ctx context.Context, eventSource *gateways.EventSource) (*gateways.ValidEventSource, error) {
- return gwcommon.ValidateGatewayEventSource(eventSource, ArgoEventsEventSourceVersion, parseEventSource, validateArtifact)
-}
-
-// validates an artifact
-func validateArtifact(config interface{}) error {
- a := config.(*apicommon.S3Artifact)
- if a == nil {
- return gwcommon.ErrNilEventSource
- }
- if a.AccessKey == nil {
- return fmt.Errorf("access key can't be empty")
- }
- if a.SecretKey == nil {
- return fmt.Errorf("secret key can't be empty")
- }
- if a.Endpoint == "" {
- return fmt.Errorf("endpoint url can't be empty")
- }
- if a.Bucket != nil && a.Bucket.Name == "" {
- return fmt.Errorf("bucket name can't be empty")
- }
- if a.Events != nil {
- for _, event := range a.Events {
- if minio.NotificationEventType(event) == "" {
- return fmt.Errorf("unknown event %s", event)
- }
- }
- }
- return nil
-}
diff --git a/gateways/core/artifact/validate_test.go b/gateways/core/artifact/validate_test.go
deleted file mode 100644
index ca7b7a3d90..0000000000
--- a/gateways/core/artifact/validate_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package artifact
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "testing"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
-)
-
-func TestValidateS3EventSource(t *testing.T) {
- convey.Convey("Given a S3 artifact spec, parse the spec and make sure no error occurs", t, func() {
- ese := &S3EventSourceExecutor{}
- content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gwcommon.EventSourceDir, "artifact.yaml"))
- convey.So(err, convey.ShouldBeNil)
-
- var cm *corev1.ConfigMap
- err = yaml.Unmarshal(content, &cm)
- convey.So(err, convey.ShouldBeNil)
- convey.So(cm, convey.ShouldNotBeNil)
-
- err = common.CheckEventSourceVersion(cm)
- convey.So(err, convey.ShouldBeNil)
-
- for key, value := range cm.Data {
- valid, _ := ese.ValidateEventSource(context.Background(), &gateways.EventSource{
- Name: key,
- Id: common.Hasher(key),
- Data: value,
- Version: cm.Labels[common.LabelArgoEventsEventSourceVersion],
- })
- convey.So(valid, convey.ShouldNotBeNil)
- convey.So(valid.IsValid, convey.ShouldBeTrue)
- }
- })
-}
diff --git a/gateways/core/calendar/config.go b/gateways/core/calendar/config.go
deleted file mode 100644
index dc9fff509b..0000000000
--- a/gateways/core/calendar/config.go
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package calendar
-
-import (
- "encoding/json"
- "github.com/sirupsen/logrus"
- "time"
-
- "github.com/ghodss/yaml"
-)
-
-const ArgoEventsEventSourceVersion = "v0.11"
-
-// CalendarEventSourceExecutor implements Eventing
-type CalendarEventSourceExecutor struct {
- Log *logrus.Logger
-}
-
-// calSchedule describes a time based dependency. One of the fields (schedule, interval, or recurrence) must be passed.
-// Schedule takes precedence over interval; interval takes precedence over recurrence
-// +k8s:openapi-gen=true
-type calSchedule struct {
- // Schedule is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron
- Schedule string `json:"schedule"`
-
- // Interval is a string that describes an interval duration, e.g. 1s, 30m, 2h...
- Interval string `json:"interval"`
-
- // List of RRULE, RDATE and EXDATE lines for a recurring event, as specified in RFC5545.
- // RRULE is a recurrence rule which defines a repeating pattern for recurring events.
- // RDATE defines the list of DATE-TIME values for recurring events.
- // EXDATE defines the list of DATE-TIME exceptions for recurring events.
- // the combination of these rules and dates combine to form a set of date times.
- // NOTE: functionality currently only supports EXDATEs, but in the future could be expanded.
- Recurrence []string `json:"recurrence,omitempty"`
-
- // Timezone in which to run the schedule
- // +optional
- Timezone string `json:"timezone,omitempty"`
-
- // UserPayload will be sent to sensor as extra data once the event is triggered
- // +optional
- UserPayload *json.RawMessage `json:"userPayload,omitempty"`
-}
-
-// calResponse is the event payload that is sent as response to sensor
-type calResponse struct {
- // EventTime is time at which event occurred
- EventTime time.Time `json:"eventTime"`
-
- // UserPayload if any
- UserPayload *json.RawMessage `json:"userPayload"`
-}
-
-func parseEventSource(eventSource string) (interface{}, error) {
- var c *calSchedule
- err := yaml.Unmarshal([]byte(eventSource), &c)
- if err != nil {
- return nil, err
- }
- return c, err
-}
diff --git a/gateways/core/calendar/config_test.go b/gateways/core/calendar/config_test.go
deleted file mode 100644
index 0d94761cc5..0000000000
--- a/gateways/core/calendar/config_test.go
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package calendar
-
-import (
- "github.com/smartystreets/goconvey/convey"
- "testing"
-)
-
-var es = `
-interval: 2s
-userPayload: "{\r\n\"hello\": \"world\"\r\n}"
-`
-
-func TestParseConfig(t *testing.T) {
- convey.Convey("Given a calendar event source, parse it", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
- _, ok := ps.(*calSchedule)
- convey.So(ok, convey.ShouldEqual, true)
- })
-}
diff --git a/gateways/core/calendar/start.go b/gateways/core/calendar/start.go
deleted file mode 100644
index a90e088a8f..0000000000
--- a/gateways/core/calendar/start.go
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package calendar
-
-import (
- "encoding/json"
- "fmt"
- "time"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- cronlib "github.com/robfig/cron"
-)
-
-// Next is a function to compute the next event time from a given time
-type Next func(time.Time) time.Time
-
-// StartEventSource starts an event source
-func (ese *CalendarEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
- log := ese.Log.WithField(common.LabelEventSource, eventSource.Name)
- log.Info("activating event source")
-
- config, err := parseEventSource(eventSource.Data)
- if err != nil {
- log.WithError(err).Error("failed to parse event source")
- return err
- }
-
- dataCh := make(chan []byte)
- errorCh := make(chan error)
- doneCh := make(chan struct{}, 1)
-
- go ese.listenEvents(config.(*calSchedule), eventSource, dataCh, errorCh, doneCh)
-
- return gateways.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, ese.Log)
-}
-
-func resolveSchedule(cal *calSchedule) (cronlib.Schedule, error) {
- if cal.Schedule != "" {
- // standard cron expression
- specParser := cronlib.NewParser(cronlib.Minute | cronlib.Hour | cronlib.Dom | cronlib.Month | cronlib.Dow)
- schedule, err := specParser.Parse(cal.Schedule)
- if err != nil {
- return nil, fmt.Errorf("failed to parse schedule %s from calendar event. Cause: %+v", cal.Schedule, err.Error())
- }
- return schedule, nil
- } else if cal.Interval != "" {
- intervalDuration, err := time.ParseDuration(cal.Interval)
- if err != nil {
- return nil, fmt.Errorf("failed to parse interval %s from calendar event. Cause: %+v", cal.Interval, err.Error())
- }
- schedule := cronlib.ConstantDelaySchedule{Delay: intervalDuration}
- return schedule, nil
- } else {
- return nil, fmt.Errorf("calendar event must contain either a schedule or interval")
- }
-}
-
-// listenEvents fires an event when schedule is passed.
-func (ese *CalendarEventSourceExecutor) listenEvents(cal *calSchedule, eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
- defer gateways.Recover(eventSource.Name)
-
- schedule, err := resolveSchedule(cal)
- if err != nil {
- errorCh <- err
- return
- }
-
- exDates, err := common.ParseExclusionDates(cal.Recurrence)
- if err != nil {
- errorCh <- err
- return
- }
-
- var next Next
- next = func(last time.Time) time.Time {
- nextT := schedule.Next(last)
- nextYear := nextT.Year()
- nextMonth := nextT.Month()
- nextDay := nextT.Day()
- for _, exDate := range exDates {
- // if exDate == nextEvent, then we need to skip this and get the next
- if exDate.Year() == nextYear && exDate.Month() == nextMonth && exDate.Day() == nextDay {
- return next(nextT)
- }
- }
- return nextT
- }
-
- lastT := time.Now()
- var location *time.Location
- if cal.Timezone != "" {
- location, err = time.LoadLocation(cal.Timezone)
- if err != nil {
- errorCh <- err
- return
- }
- lastT = lastT.In(location)
- }
-
- for {
- t := next(lastT)
- timer := time.After(time.Until(t))
- ese.Log.WithFields(
- map[string]interface{}{
- common.LabelEventSource: eventSource.Name,
- common.LabelTime: t.UTC().String(),
- }).Info("expected next calendar event")
- select {
- case tx := <-timer:
- lastT = tx
- if location != nil {
- lastT = lastT.In(location)
- }
- response := &calResponse{
- EventTime: tx,
- UserPayload: cal.UserPayload,
- }
- payload, err := json.Marshal(response)
- if err != nil {
- errorCh <- err
- return
- }
- dataCh <- payload
- case <-doneCh:
- return
- }
- }
-}
diff --git a/gateways/core/calendar/validate.go b/gateways/core/calendar/validate.go
deleted file mode 100644
index 9d90e20fe5..0000000000
--- a/gateways/core/calendar/validate.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package calendar
-
-import (
- "context"
- "fmt"
-
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
-)
-
-// ValidateEventSource validates gateway event source
-func (ese *CalendarEventSourceExecutor) ValidateEventSource(ctx context.Context, eventSource *gateways.EventSource) (*gateways.ValidEventSource, error) {
- return gwcommon.ValidateGatewayEventSource(eventSource, ArgoEventsEventSourceVersion, parseEventSource, validateSchedule)
-}
-
-func validateSchedule(config interface{}) error {
- cal := config.(*calSchedule)
- if cal == nil {
- return gwcommon.ErrNilEventSource
- }
- if cal.Schedule == "" && cal.Interval == "" {
- return fmt.Errorf("must have either schedule or interval")
- }
- if _, err := resolveSchedule(cal); err != nil {
- return err
- }
- return nil
-}
diff --git a/gateways/core/calendar/validate_test.go b/gateways/core/calendar/validate_test.go
deleted file mode 100644
index 6f54d616b0..0000000000
--- a/gateways/core/calendar/validate_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package calendar
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "testing"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
-)
-
-func TestValidateCalendarEventSource(t *testing.T) {
- convey.Convey("Given a calendar spec, parse it and make sure no error occurs", t, func() {
- ese := &CalendarEventSourceExecutor{}
- content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gwcommon.EventSourceDir, "calendar.yaml"))
- convey.So(err, convey.ShouldBeNil)
-
- var cm *corev1.ConfigMap
- err = yaml.Unmarshal(content, &cm)
- convey.So(err, convey.ShouldBeNil)
- convey.So(cm, convey.ShouldNotBeNil)
-
- err = common.CheckEventSourceVersion(cm)
- convey.So(err, convey.ShouldBeNil)
-
- for key, value := range cm.Data {
- valid, _ := ese.ValidateEventSource(context.Background(), &gateways.EventSource{
- Name: key,
- Id: common.Hasher(key),
- Data: value,
- Version: cm.Labels[common.LabelArgoEventsEventSourceVersion],
- })
- convey.So(valid, convey.ShouldNotBeNil)
- convey.So(valid.IsValid, convey.ShouldBeTrue)
- }
- })
-}
diff --git a/gateways/core/file/config.go b/gateways/core/file/config.go
deleted file mode 100644
index 3170923287..0000000000
--- a/gateways/core/file/config.go
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package file
-
-import (
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/sirupsen/logrus"
-)
-
-const ArgoEventsEventSourceVersion = "v0.11"
-
-// FileEventSourceExecutor implements Eventing
-type FileEventSourceExecutor struct {
- Log *logrus.Logger
-}
-
-// fileWatcher contains configuration information for this gateway
-// +k8s:openapi-gen=true
-type fileWatcher struct {
- gwcommon.WatchPathConfig `json:",inline"`
-
- // Type of file operations to watch
- // Refer https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go for more information
- Type string `json:"type"`
-}
-
-func parseEventSource(eventSource string) (interface{}, error) {
- var f *fileWatcher
- err := yaml.Unmarshal([]byte(eventSource), &f)
- if err != nil {
- return nil, err
- }
- return f, err
-}
diff --git a/gateways/core/file/config_test.go b/gateways/core/file/config_test.go
deleted file mode 100644
index b4be30ff2c..0000000000
--- a/gateways/core/file/config_test.go
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package file
-
-import (
- "github.com/smartystreets/goconvey/convey"
- "testing"
-)
-
-var es = `
-directory: "/bin/"
-type: CREATE
-path: x.txt
-`
-
-func TestParseConfig(t *testing.T) {
- convey.Convey("Given a file event source, parse it", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
- _, ok := ps.(*fileWatcher)
- convey.So(ok, convey.ShouldEqual, true)
- })
-}
diff --git a/gateways/core/file/start.go b/gateways/core/file/start.go
deleted file mode 100644
index 6782949518..0000000000
--- a/gateways/core/file/start.go
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package file
-
-import (
- "encoding/json"
- "fmt"
- "github.com/argoproj/argo-events/common"
- "regexp"
- "strings"
-
- "github.com/argoproj/argo-events/gateways"
- "github.com/argoproj/argo-events/gateways/common/fsevent"
- "github.com/fsnotify/fsnotify"
-)
-
-// StartEventSource starts an event source
-func (ese *FileEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
- log := ese.Log.WithField(common.LabelEventSource, eventSource.Name)
- log.Info("activating event source")
-
- config, err := parseEventSource(eventSource.Data)
- if err != nil {
- log.WithError(err).Error("failed to parse event source")
- return err
- }
-
- dataCh := make(chan []byte)
- errorCh := make(chan error)
- doneCh := make(chan struct{}, 1)
-
- go ese.listenEvents(config.(*fileWatcher), eventSource, dataCh, errorCh, doneCh)
-
- return gateways.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, ese.Log)
-}
-
-func (ese *FileEventSourceExecutor) listenEvents(fwc *fileWatcher, eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
- defer gateways.Recover(eventSource.Name)
-
- log := ese.Log.WithField(common.LabelEventSource, eventSource.Name)
-
- // create new fs watcher
- watcher, err := fsnotify.NewWatcher()
- if err != nil {
- errorCh <- err
- return
- }
- defer watcher.Close()
-
- // file descriptor to watch must be available in file system. You can't watch an fs descriptor that is not present.
- err = watcher.Add(fwc.Directory)
- if err != nil {
- errorCh <- err
- return
- }
-
- var pathRegexp *regexp.Regexp
- if fwc.PathRegexp != "" {
- pathRegexp, err = regexp.Compile(fwc.PathRegexp)
- if err != nil {
- errorCh <- err
- return
- }
- }
-
- log.Info("starting to watch to file notifications")
- for {
- select {
- case event, ok := <-watcher.Events:
- if !ok {
- log.Info("fs watcher has stopped")
- // watcher stopped watching file events
- errorCh <- fmt.Errorf("fs watcher stopped")
- return
- }
- // fwc.Path == event.Name is required because we don't want to send event when .swp files are created
- matched := false
- relPath := strings.TrimPrefix(event.Name, fwc.Directory)
- if fwc.Path != "" && fwc.Path == relPath {
- matched = true
- } else if pathRegexp != nil && pathRegexp.MatchString(relPath) {
- matched = true
- }
- if matched && fwc.Type == event.Op.String() {
- log.WithFields(
- map[string]interface{}{
- "event-type": event.Op.String(),
- "descriptor-name": event.Name,
- },
- ).Debug("fs event")
-
- // Assume fsnotify event has the same Op spec of our file event
- fileEvent := fsevent.Event{Name: event.Name, Op: fsevent.NewOp(event.Op.String())}
- payload, err := json.Marshal(fileEvent)
- if err != nil {
- errorCh <- err
- return
- }
- dataCh <- payload
- }
- case err := <-watcher.Errors:
- errorCh <- err
- return
- case <-doneCh:
- return
- }
- }
-}
diff --git a/gateways/core/file/validate.go b/gateways/core/file/validate.go
deleted file mode 100644
index 4a1d27fa79..0000000000
--- a/gateways/core/file/validate.go
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package file
-
-import (
- "context"
- "fmt"
-
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
-)
-
-// ValidateEventSource validates gateway event source
-func (ese *FileEventSourceExecutor) ValidateEventSource(ctx context.Context, es *gateways.EventSource) (*gateways.ValidEventSource, error) {
- return gwcommon.ValidateGatewayEventSource(es, ArgoEventsEventSourceVersion, parseEventSource, validateFileWatcher)
-}
-
-func validateFileWatcher(config interface{}) error {
- fwc := config.(*fileWatcher)
- if fwc == nil {
- return gwcommon.ErrNilEventSource
- }
- if fwc.Type == "" {
- return fmt.Errorf("type must be specified")
- }
- err := fwc.WatchPathConfig.Validate()
- return err
-}
diff --git a/gateways/core/file/validate_test.go b/gateways/core/file/validate_test.go
deleted file mode 100644
index f97a61420b..0000000000
--- a/gateways/core/file/validate_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package file
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "testing"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
-)
-
-func TestValidateFileEventSource(t *testing.T) {
- convey.Convey("Given a file event source spec, parse it and make sure no error occurs", t, func() {
- ese := &FileEventSourceExecutor{}
- content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gwcommon.EventSourceDir, "file.yaml"))
- convey.So(err, convey.ShouldBeNil)
-
- var cm *corev1.ConfigMap
- err = yaml.Unmarshal(content, &cm)
- convey.So(err, convey.ShouldBeNil)
- convey.So(cm, convey.ShouldNotBeNil)
-
- err = common.CheckEventSourceVersion(cm)
- convey.So(err, convey.ShouldBeNil)
-
- for key, value := range cm.Data {
- valid, _ := ese.ValidateEventSource(context.Background(), &gateways.EventSource{
- Name: key,
- Id: common.Hasher(key),
- Data: value,
- Version: cm.Labels[common.LabelArgoEventsEventSourceVersion],
- })
- convey.So(valid, convey.ShouldNotBeNil)
- convey.So(valid.IsValid, convey.ShouldBeTrue)
- }
- })
-}
diff --git a/gateways/core/resource/config.go b/gateways/core/resource/config.go
deleted file mode 100644
index 27dc40afc4..0000000000
--- a/gateways/core/resource/config.go
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package resource
-
-import (
- "github.com/ghodss/yaml"
- "github.com/sirupsen/logrus"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/client-go/rest"
-)
-
-const ArgoEventsEventSourceVersion = "v0.11"
-
-type EventType string
-
-const (
- ADD EventType = "ADD"
- UPDATE EventType = "UPDATE"
- DELETE EventType = "DELETE"
-)
-
-// InformerEvent holds event generated from resource state change
-type InformerEvent struct {
- Obj interface{}
- OldObj interface{}
- Type EventType
-}
-
-// ResourceEventSourceExecutor implements Eventing
-type ResourceEventSourceExecutor struct {
- Log *logrus.Logger
- // K8RestConfig is kubernetes cluster config
- K8RestConfig *rest.Config
-}
-
-// resource refers to a dependency on a k8s resource.
-type resource struct {
- // Namespace where resource is deployed
- Namespace string `json:"namespace"`
- // Filter is applied on the metadata of the resource
- Filter *ResourceFilter `json:"filter,omitempty"`
- // Group of the resource
- metav1.GroupVersionResource `json:",inline"`
- // Type is the event type.
- // If not provided, the gateway will watch all events for a resource.
- Type EventType `json:"type,omitempty"`
-}
-
-// ResourceFilter contains K8 ObjectMeta information to further filter resource event objects
-type ResourceFilter struct {
- Prefix string `json:"prefix,omitempty"`
- Labels map[string]string `json:"labels,omitempty"`
- Annotations map[string]string `json:"annotations,omitempty"`
- Fields map[string]string `json:"fields,omitempty"`
- CreatedBy metav1.Time `json:"createdBy,omitempty"`
-}
-
-func parseEventSource(es string) (interface{}, error) {
- var r *resource
- err := yaml.Unmarshal([]byte(es), &r)
- if err != nil {
- return nil, err
- }
- return r, err
-}
diff --git a/gateways/core/resource/config_test.go b/gateways/core/resource/config_test.go
deleted file mode 100644
index 34a615799e..0000000000
--- a/gateways/core/resource/config_test.go
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package resource
-
-import (
- "github.com/smartystreets/goconvey/convey"
- "testing"
-)
-
-var es = `
-namespace: "argo-events"
-group: ""
-version: "v1"
-resource: "pods"
-filter:
- labels:
- workflows.argoproj.io/phase: Succeeded
- name: "my-workflow"
-`
-
-func TestParseConfig(t *testing.T) {
- convey.Convey("Given a resource event source, parse it", t, func() {
- ps, err := parseEventSource(es)
-
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
-
- resource, ok := ps.(*resource)
- convey.So(ok, convey.ShouldEqual, true)
-
- convey.So(resource.Group, convey.ShouldEqual, "")
- convey.So(resource.Version, convey.ShouldEqual, "v1")
- convey.So(resource.Resource, convey.ShouldEqual, "pods")
- })
-}
diff --git a/gateways/core/resource/validate.go b/gateways/core/resource/validate.go
deleted file mode 100644
index 2b0a51f31b..0000000000
--- a/gateways/core/resource/validate.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package resource
-
-import (
- "context"
- "fmt"
-
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
-)
-
-// ValidateEventSource validates gateway event source
-func (executor *ResourceEventSourceExecutor) ValidateEventSource(ctx context.Context, es *gateways.EventSource) (*gateways.ValidEventSource, error) {
- return gwcommon.ValidateGatewayEventSource(es, ArgoEventsEventSourceVersion, parseEventSource, validateResource)
-}
-
-func validateResource(config interface{}) error {
- res := config.(*resource)
- if res == nil {
- return gwcommon.ErrNilEventSource
- }
- if res.Version == "" {
- return fmt.Errorf("version must be specified")
- }
- if res.Resource == "" {
- return fmt.Errorf("resource must be specified")
- }
- return nil
-}
diff --git a/gateways/core/resource/validate_test.go b/gateways/core/resource/validate_test.go
deleted file mode 100644
index 120855a43a..0000000000
--- a/gateways/core/resource/validate_test.go
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package resource
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "testing"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
-)
-
-func TestValidateResourceEventSource(t *testing.T) {
- convey.Convey("Given a resource event source spec, parse it and make sure no error occurs", t, func() {
- ese := &ResourceEventSourceExecutor{}
- content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gwcommon.EventSourceDir, "resource.yaml"))
- convey.So(err, convey.ShouldBeNil)
-
- var cm *corev1.ConfigMap
- err = yaml.Unmarshal(content, &cm)
- convey.So(err, convey.ShouldBeNil)
- convey.So(cm, convey.ShouldNotBeNil)
-
- err = common.CheckEventSourceVersion(cm)
- convey.So(err, convey.ShouldBeNil)
-
- for key, value := range cm.Data {
- valid, _ := ese.ValidateEventSource(context.Background(), &gateways.EventSource{
- Name: key,
- Id: common.Hasher(key),
- Data: value,
- Version: cm.Labels[common.LabelArgoEventsEventSourceVersion],
- })
- convey.So(valid, convey.ShouldNotBeNil)
- convey.Println(valid.Reason)
- convey.So(valid.IsValid, convey.ShouldBeTrue)
- }
- })
-}
diff --git a/gateways/core/stream/amqp/config.go b/gateways/core/stream/amqp/config.go
deleted file mode 100644
index cd5555ad16..0000000000
--- a/gateways/core/stream/amqp/config.go
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package amqp
-
-import (
- "github.com/argoproj/argo-events/common"
- "github.com/ghodss/yaml"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
- amqplib "github.com/streadway/amqp"
-)
-
-const ArgoEventsEventSourceVersion = "v0.11"
-
-// AMQPEventSourceExecutor implements Eventing
-type AMQPEventSourceExecutor struct {
- Log *logrus.Logger
-}
-
-// amqp contains configuration required to connect to rabbitmq service and process messages
-type amqp struct {
- // URL for rabbitmq service
- URL string `json:"url"`
- // ExchangeName is the exchange name
- // For more information, visit https://www.rabbitmq.com/tutorials/amqp-concepts.html
- ExchangeName string `json:"exchangeName"`
- // ExchangeType is rabbitmq exchange type
- ExchangeType string `json:"exchangeType"`
- // Routing key for bindings
- RoutingKey string `json:"routingKey"`
- // Backoff holds parameters applied to connection.
- Backoff *common.Backoff `json:"backoff,omitempty"`
- // Connection manages the serialization and deserialization of frames from IO
- // and dispatches the frames to the appropriate channel.
- conn *amqplib.Connection
- // Maximum number of events consumed from the queue per RatePeriod.
- RateLimit uint32 `json:"rateLimit,omitempty"`
- // Number of seconds between two consumptions.
- RatePeriod uint32 `json:"ratePeriod,omitempty"`
-}
-
-func parseEventSource(eventSource string) (interface{}, error) {
- var a *amqp
- err := yaml.Unmarshal([]byte(eventSource), &a)
- if err != nil {
- return nil, err
- }
- return a, nil
-}
-
-// Validate validates amqp
-func (a *amqp) Validate() error {
- if (a.RateLimit == 0) != (a.RatePeriod == 0) {
- return errors.New("RateLimit and RatePeriod must be either set or omitted")
- }
- return nil
-}
diff --git a/gateways/core/stream/amqp/config_test.go b/gateways/core/stream/amqp/config_test.go
deleted file mode 100644
index 890604fd75..0000000000
--- a/gateways/core/stream/amqp/config_test.go
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package amqp
-
-import (
- "github.com/smartystreets/goconvey/convey"
- "testing"
-)
-
-var es = `
-url: amqp://amqp.argo-events:5672
-exchangeName: fooExchangeName
-exchangeType: fanout
-routingKey: fooRoutingKey
-`
-
-func TestParseConfig(t *testing.T) {
- convey.Convey("Given a amqp event source, parse it", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
- _, ok := ps.(*amqp)
- convey.So(ok, convey.ShouldEqual, true)
- })
-}
diff --git a/gateways/core/stream/amqp/start.go b/gateways/core/stream/amqp/start.go
deleted file mode 100644
index ce383e84aa..0000000000
--- a/gateways/core/stream/amqp/start.go
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package amqp
-
-import (
- "fmt"
- "time"
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- amqplib "github.com/streadway/amqp"
- "k8s.io/apimachinery/pkg/util/wait"
-)
-
-// StartEventSource starts an event source
-func (ese *AMQPEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
- log := ese.Log.WithField(common.LabelEventSource, eventSource.Name)
-
- log.Info("operating on event source")
- config, err := parseEventSource(eventSource.Data)
- if err != nil {
- log.WithError(err).Error("failed to parse event source")
- return err
- }
-
- dataCh := make(chan []byte)
- errorCh := make(chan error)
- doneCh := make(chan struct{}, 1)
-
- go ese.listenEvents(config.(*amqp), eventSource, dataCh, errorCh, doneCh)
-
- return gateways.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, ese.Log)
-}
-
-func getLimitedDelivery(ch *amqplib.Channel, a *amqp, delivery chan amqplib.Delivery, queue string) {
- period := time.Duration(a.RatePeriod) * time.Second
- for {
- startTime := time.Now()
-
- for i := uint32(0); i < a.RateLimit; i++ {
- msg, ok, err := ch.Get(queue, true)
-
- if err != nil || ok == false {
- break
- }
- delivery <- msg
-
- if time.Now().After(startTime.Add(period)) {
- startTime = time.Now()
- i = 0
- }
- }
-
- remainingTime := startTime.Add(period).Sub(time.Now())
- time.Sleep(remainingTime)
- }
-}
-
-func getDelivery(ch *amqplib.Channel, a *amqp) (<-chan amqplib.Delivery, error) {
- err := ch.ExchangeDeclare(a.ExchangeName, a.ExchangeType, true, false, false, false, nil)
- if err != nil {
- return nil, fmt.Errorf("failed to declare exchange with name %s and type %s. err: %+v", a.ExchangeName, a.ExchangeType, err)
- }
-
- q, err := ch.QueueDeclare("", false, false, true, false, nil)
- if err != nil {
- return nil, fmt.Errorf("failed to declare queue: %s", err)
- }
-
- err = ch.QueueBind(q.Name, a.RoutingKey, a.ExchangeName, false, nil)
- if err != nil {
- return nil, fmt.Errorf("failed to bind %s exchange '%s' to queue with routingKey: %s: %s", a.ExchangeType, a.ExchangeName, a.RoutingKey, err)
- }
-
- if a.RateLimit != 0 {
- delivery := make(chan amqplib.Delivery)
- go getLimitedDelivery(ch, a, delivery, q.Name)
- return delivery, nil
- }
-
- delivery, err := ch.Consume(q.Name, "", true, false, false, false, nil)
- if err != nil {
- return nil, fmt.Errorf("failed to begin consuming messages: %s", err)
- }
- return delivery, nil
-}
-
-func (ese *AMQPEventSourceExecutor) listenEvents(a *amqp, eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
- defer gateways.Recover(eventSource.Name)
-
- if err := gateways.Connect(&wait.Backoff{
- Steps: a.Backoff.Steps,
- Factor: a.Backoff.Factor,
- Duration: a.Backoff.Duration,
- Jitter: a.Backoff.Jitter,
- }, func() error {
- var err error
- a.conn, err = amqplib.Dial(a.URL)
- if err != nil {
- return err
- }
- return nil
- }); err != nil {
- errorCh <- err
- return
- }
-
- ch, err := a.conn.Channel()
- if err != nil {
- errorCh <- err
- return
- }
-
- delivery, err := getDelivery(ch, a)
- if err != nil {
- errorCh <- err
- return
- }
-
- log := ese.Log.WithField(common.LabelEventSource, eventSource.Name)
-
- log.Info("starting to subscribe to messages")
- for {
- select {
- case msg := <-delivery:
- dataCh <- msg.Body
- case <-doneCh:
- err = a.conn.Close()
- if err != nil {
- log.WithError(err).Info("failed to close connection")
- }
- return
- }
- }
-}
diff --git a/gateways/core/stream/amqp/validate.go b/gateways/core/stream/amqp/validate.go
deleted file mode 100644
index a1d018ad2e..0000000000
--- a/gateways/core/stream/amqp/validate.go
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package amqp
-
-import (
- "context"
- "fmt"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
-)
-
-// ValidateEventSource validates gateway event source
-func (ese *AMQPEventSourceExecutor) ValidateEventSource(ctx context.Context, es *gateways.EventSource) (*gateways.ValidEventSource, error) {
- return gwcommon.ValidateGatewayEventSource(es, ArgoEventsEventSourceVersion, parseEventSource, validateAMQP)
-}
-
-func validateAMQP(config interface{}) error {
- a := config.(*amqp)
- if a == nil {
- return gwcommon.ErrNilEventSource
- }
- if a.URL == "" {
- return fmt.Errorf("url must be specified")
- }
- if a.RoutingKey == "" {
- return fmt.Errorf("routing key must be specified")
- }
- if a.ExchangeName == "" {
- return fmt.Errorf("exchange name must be specified")
- }
- if a.ExchangeType == "" {
- return fmt.Errorf("exchange type must be specified")
- }
- return nil
-}
diff --git a/gateways/core/stream/amqp/validate_test.go b/gateways/core/stream/amqp/validate_test.go
deleted file mode 100644
index f8e91b8d90..0000000000
--- a/gateways/core/stream/amqp/validate_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package amqp
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "testing"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
-)
-
-func TestValidateAMQPEventSource(t *testing.T) {
- convey.Convey("Given a amqp event source spec, parse it and make sure no error occurs", t, func() {
- ese := &AMQPEventSourceExecutor{}
- content, err := ioutil.ReadFile(fmt.Sprintf("../%s/%s", gwcommon.EventSourceDir, "amqp.yaml"))
- convey.So(err, convey.ShouldBeNil)
-
- var cm *corev1.ConfigMap
- err = yaml.Unmarshal(content, &cm)
- convey.So(err, convey.ShouldBeNil)
- convey.So(cm, convey.ShouldNotBeNil)
-
- err = common.CheckEventSourceVersion(cm)
- convey.So(err, convey.ShouldBeNil)
-
- for key, value := range cm.Data {
- valid, _ := ese.ValidateEventSource(context.Background(), &gateways.EventSource{
- Name: key,
- Id: common.Hasher(key),
- Data: value,
- Version: cm.Labels[common.LabelArgoEventsEventSourceVersion],
- })
- convey.So(valid, convey.ShouldNotBeNil)
- convey.So(valid.IsValid, convey.ShouldBeTrue)
- }
- })
-}
diff --git a/gateways/core/stream/kafka/config.go b/gateways/core/stream/kafka/config.go
deleted file mode 100644
index 4ed5a1006c..0000000000
--- a/gateways/core/stream/kafka/config.go
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package kafka
-
-import (
- "github.com/Shopify/sarama"
- "github.com/argoproj/argo-events/common"
- "github.com/ghodss/yaml"
- "github.com/sirupsen/logrus"
-)
-
-const ArgoEventsEventSourceVersion = "v0.11"
-
-// KafkaEventSourceExecutor implements Eventing
-type KafkaEventSourceExecutor struct {
- Log *logrus.Logger
-}
-
-// kafka defines configuration required to connect to kafka cluster
-type kafka struct {
- // URL to kafka cluster
- URL string `json:"url"`
- // Partition name
- Partition string `json:"partition"`
- // Topic name
- Topic string `json:"topic"`
- // Backoff holds parameters applied to connection.
- Backoff *common.Backoff `json:"backoff,omitempty"`
- // Consumer manages PartitionConsumers which process Kafka messages from brokers.
- consumer sarama.Consumer
-}
-
-func parseEventSource(eventSource string) (interface{}, error) {
- var n *kafka
- err := yaml.Unmarshal([]byte(eventSource), &n)
- if err != nil {
- return nil, err
- }
- return n, nil
-}
diff --git a/gateways/core/stream/kafka/config_test.go b/gateways/core/stream/kafka/config_test.go
deleted file mode 100644
index c8d6a26a43..0000000000
--- a/gateways/core/stream/kafka/config_test.go
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package kafka
-
-import (
- "github.com/smartystreets/goconvey/convey"
- "testing"
-)
-
-var es = `
-url: kafka.argo-events:9092
-topic: foo
-partition: "0"
-`
-
-func TestParseConfig(t *testing.T) {
- convey.Convey("Given a kafka event source, parse it", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
- _, ok := ps.(*kafka)
- convey.So(ok, convey.ShouldEqual, true)
- })
-}
diff --git a/gateways/core/stream/kafka/start.go b/gateways/core/stream/kafka/start.go
deleted file mode 100644
index 2a678e0540..0000000000
--- a/gateways/core/stream/kafka/start.go
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package kafka
-
-import (
- "fmt"
- "github.com/Shopify/sarama"
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "k8s.io/apimachinery/pkg/util/wait"
- "strconv"
-)
-
-func verifyPartitionAvailable(part int32, partitions []int32) bool {
- for _, p := range partitions {
- if part == p {
- return true
- }
- }
- return false
-}
-
-// StartEventSource starts an event source
-func (ese *KafkaEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
- log := ese.Log.WithField(common.LabelEventSource, eventSource.Name)
-
- log.Info("operating on event source")
- config, err := parseEventSource(eventSource.Data)
- if err != nil {
- log.WithError(err).Error("failed to parse event source")
- return err
- }
-
- dataCh := make(chan []byte)
- errorCh := make(chan error)
- doneCh := make(chan struct{}, 1)
-
- go ese.listenEvents(config.(*kafka), eventSource, dataCh, errorCh, doneCh)
-
- return gateways.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, ese.Log)
-}
-
-func (ese *KafkaEventSourceExecutor) listenEvents(k *kafka, eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
- defer gateways.Recover(eventSource.Name)
-
- log := ese.Log.WithField(common.LabelEventSource, eventSource.Name)
-
- if err := gateways.Connect(&wait.Backoff{
- Steps: k.Backoff.Steps,
- Jitter: k.Backoff.Jitter,
- Duration: k.Backoff.Duration,
- Factor: k.Backoff.Factor,
- }, func() error {
- var err error
- k.consumer, err = sarama.NewConsumer([]string{k.URL}, nil)
- if err != nil {
- return err
- }
- return nil
- }); err != nil {
- log.WithError(err).WithField(common.LabelURL, k.URL).Error("failed to connect")
- errorCh <- err
- return
- }
-
- pInt, err := strconv.ParseInt(k.Partition, 10, 32)
- if err != nil {
- errorCh <- err
- return
- }
- partition := int32(pInt)
-
- availablePartitions, err := k.consumer.Partitions(k.Topic)
- if err != nil {
- errorCh <- err
- return
- }
- if ok := verifyPartitionAvailable(partition, availablePartitions); !ok {
- errorCh <- fmt.Errorf("partition %d is not available", partition)
- return
- }
-
- partitionConsumer, err := k.consumer.ConsumePartition(k.Topic, partition, sarama.OffsetNewest)
- if err != nil {
- errorCh <- err
- return
- }
-
- log.Info("starting to subscribe to messages")
- for {
- select {
- case msg := <-partitionConsumer.Messages():
- dataCh <- msg.Value
-
- case err := <-partitionConsumer.Errors():
- errorCh <- err
- return
-
- case <-doneCh:
- err = partitionConsumer.Close()
- if err != nil {
- log.WithError(err).Error("failed to close consumer")
- }
- return
- }
- }
-}
diff --git a/gateways/core/stream/kafka/validate.go b/gateways/core/stream/kafka/validate.go
deleted file mode 100644
index 2cd8c138f1..0000000000
--- a/gateways/core/stream/kafka/validate.go
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package kafka
-
-import (
- "context"
- "fmt"
-
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
-)
-
-// ValidateEventSource validates the gateway event source
-func (ese *KafkaEventSourceExecutor) ValidateEventSource(ctx context.Context, es *gateways.EventSource) (*gateways.ValidEventSource, error) {
- return gwcommon.ValidateGatewayEventSource(es, ArgoEventsEventSourceVersion, parseEventSource, validateKafka)
-}
-
-func validateKafka(config interface{}) error {
- k := config.(*kafka)
- if k == nil {
- return gwcommon.ErrNilEventSource
- }
- if k.URL == "" {
- return fmt.Errorf("url must be specified")
- }
- if k.Topic == "" {
- return fmt.Errorf("topic must be specified")
- }
- if k.Partition == "" {
- return fmt.Errorf("partition must be specified")
- }
- return nil
-}
diff --git a/gateways/core/stream/kafka/validate_test.go b/gateways/core/stream/kafka/validate_test.go
deleted file mode 100644
index b3678f73fa..0000000000
--- a/gateways/core/stream/kafka/validate_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package kafka
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "testing"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
-)
-
-func TestValidateKafkaEventSource(t *testing.T) {
- convey.Convey("Given a kafka event source spec, parse it and make sure no error occurs", t, func() {
- ese := &KafkaEventSourceExecutor{}
- content, err := ioutil.ReadFile(fmt.Sprintf("../%s/%s", gwcommon.EventSourceDir, "kafka.yaml"))
- convey.So(err, convey.ShouldBeNil)
-
- var cm *corev1.ConfigMap
- err = yaml.Unmarshal(content, &cm)
- convey.So(err, convey.ShouldBeNil)
- convey.So(cm, convey.ShouldNotBeNil)
-
- err = common.CheckEventSourceVersion(cm)
- convey.So(err, convey.ShouldBeNil)
-
- for key, value := range cm.Data {
- valid, _ := ese.ValidateEventSource(context.Background(), &gateways.EventSource{
- Name: key,
- Id: common.Hasher(key),
- Data: value,
- Version: cm.Labels[common.LabelArgoEventsEventSourceVersion],
- })
- convey.So(valid, convey.ShouldNotBeNil)
- convey.So(valid.IsValid, convey.ShouldBeTrue)
- }
- })
-}
diff --git a/gateways/core/stream/mqtt/config.go b/gateways/core/stream/mqtt/config.go
deleted file mode 100644
index 9528ee31bd..0000000000
--- a/gateways/core/stream/mqtt/config.go
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package mqtt
-
-import (
- "github.com/argoproj/argo-events/common"
- mqttlib "github.com/eclipse/paho.mqtt.golang"
- "github.com/ghodss/yaml"
- "github.com/sirupsen/logrus"
-)
-
-const ArgoEventsEventSourceVersion = "v0.11"
-
-// MqttEventSourceExecutor implements Eventing
-type MqttEventSourceExecutor struct {
- Log *logrus.Logger
-}
-
-// mqtt contains information to connect to MQTT broker
-type mqtt struct {
- // URL to connect to broker
- URL string `json:"url"`
- // Topic name
- Topic string `json:"topic"`
- // Client ID
- ClientId string `json:"clientId"`
- // Backoff holds parameters applied to connection.
- Backoff *common.Backoff `json:"backoff,omitempty"`
- // It is an MQTT client for communicating with an MQTT server
- client mqttlib.Client
-}
-
-func parseEventSource(eventSource string) (interface{}, error) {
- var m *mqtt
- err := yaml.Unmarshal([]byte(eventSource), &m)
- if err != nil {
- return nil, err
- }
- return m, nil
-}
diff --git a/gateways/core/stream/mqtt/start.go b/gateways/core/stream/mqtt/start.go
deleted file mode 100644
index eec63dacf6..0000000000
--- a/gateways/core/stream/mqtt/start.go
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package mqtt
-
-import (
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- mqttlib "github.com/eclipse/paho.mqtt.golang"
- "k8s.io/apimachinery/pkg/util/wait"
-)
-
-// StartEventSource starts an event source
-func (ese *MqttEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
- log := ese.Log.WithField(common.LabelEventSource, eventSource.Name)
-
- log.Info("operating on event source")
- config, err := parseEventSource(eventSource.Data)
- if err != nil {
- log.WithError(err).Error("failed to parse event source")
- return err
- }
-
- dataCh := make(chan []byte)
- errorCh := make(chan error)
- doneCh := make(chan struct{}, 1)
-
- go ese.listenEvents(config.(*mqtt), eventSource, dataCh, errorCh, doneCh)
-
- return gateways.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, ese.Log)
-}
-
-func (ese *MqttEventSourceExecutor) listenEvents(m *mqtt, eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
- defer gateways.Recover(eventSource.Name)
-
- log := ese.Log.WithFields(
- map[string]interface{}{
- common.LabelEventSource: eventSource.Name,
- common.LabelURL: m.URL,
- common.LabelClientID: m.ClientId,
- },
- )
-
- handler := func(c mqttlib.Client, msg mqttlib.Message) {
- dataCh <- msg.Payload()
- }
- opts := mqttlib.NewClientOptions().AddBroker(m.URL).SetClientID(m.ClientId)
-
- if err := gateways.Connect(&wait.Backoff{
- Factor: m.Backoff.Factor,
- Duration: m.Backoff.Duration,
- Jitter: m.Backoff.Jitter,
- Steps: m.Backoff.Steps,
- }, func() error {
- client := mqttlib.NewClient(opts)
- if token := client.Connect(); token.Wait() && token.Error() != nil {
- return token.Error()
- }
- return nil
- }); err != nil {
- log.Info("failed to connect")
- errorCh <- err
- return
- }
-
- log.Info("subscribing to topic")
- if token := m.client.Subscribe(m.Topic, 0, handler); token.Wait() && token.Error() != nil {
- log.WithError(token.Error()).Error("failed to subscribe")
- errorCh <- token.Error()
- return
- }
-
- <-doneCh
- token := m.client.Unsubscribe(m.Topic)
- if token.Error() != nil {
- log.WithError(token.Error()).Error("failed to unsubscribe client")
- }
-}
diff --git a/gateways/core/stream/mqtt/validate.go b/gateways/core/stream/mqtt/validate.go
deleted file mode 100644
index f3a3a4d8b5..0000000000
--- a/gateways/core/stream/mqtt/validate.go
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package mqtt
-
-import (
- "context"
- "fmt"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
-)
-
-// ValidateEventSource validates gateway event source
-func (ese *MqttEventSourceExecutor) ValidateEventSource(ctx context.Context, es *gateways.EventSource) (*gateways.ValidEventSource, error) {
- return gwcommon.ValidateGatewayEventSource(es, ArgoEventsEventSourceVersion, parseEventSource, validateMQTT)
-}
-
-func validateMQTT(config interface{}) error {
- m := config.(*mqtt)
- if m == nil {
- return gwcommon.ErrNilEventSource
- }
- if m.URL == "" {
- return fmt.Errorf("url must be specified")
- }
- if m.Topic == "" {
- return fmt.Errorf("topic must be specified")
- }
- if m.ClientId == "" {
- return fmt.Errorf("client id must be specified")
- }
- return nil
-}
diff --git a/gateways/core/stream/mqtt/validate_test.go b/gateways/core/stream/mqtt/validate_test.go
deleted file mode 100644
index d700b9d40b..0000000000
--- a/gateways/core/stream/mqtt/validate_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package mqtt
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "testing"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
-)
-
-func TestValidateMqttEventSource(t *testing.T) {
- convey.Convey("Given a mqtt event source spec, parse it and make sure no error occurs", t, func() {
- ese := &MqttEventSourceExecutor{}
- content, err := ioutil.ReadFile(fmt.Sprintf("../%s/%s", gwcommon.EventSourceDir, "mqtt.yaml"))
- convey.So(err, convey.ShouldBeNil)
-
- var cm *corev1.ConfigMap
- err = yaml.Unmarshal(content, &cm)
- convey.So(err, convey.ShouldBeNil)
- convey.So(cm, convey.ShouldNotBeNil)
-
- err = common.CheckEventSourceVersion(cm)
- convey.So(err, convey.ShouldBeNil)
-
- for key, value := range cm.Data {
- valid, _ := ese.ValidateEventSource(context.Background(), &gateways.EventSource{
- Name: key,
- Id: common.Hasher(key),
- Data: value,
- Version: cm.Labels[common.LabelArgoEventsEventSourceVersion],
- })
- convey.So(valid, convey.ShouldNotBeNil)
- convey.So(valid.IsValid, convey.ShouldBeTrue)
- }
- })
-}
diff --git a/gateways/core/stream/nats/config.go b/gateways/core/stream/nats/config.go
deleted file mode 100644
index cc710ffc10..0000000000
--- a/gateways/core/stream/nats/config.go
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package nats
-
-import (
- "github.com/argoproj/argo-events/common"
- "github.com/ghodss/yaml"
- natslib "github.com/nats-io/go-nats"
- "github.com/sirupsen/logrus"
-)
-
-const ArgoEventsEventSourceVersion = "v0.11"
-
-// NatsEventSourceExecutor implements Eventing
-type NatsEventSourceExecutor struct {
- Log *logrus.Logger
-}
-
-// Nats contains configuration to connect to NATS cluster
-type natsConfig struct {
- // URL to connect to natsConfig cluster
- URL string `json:"url"`
- // Subject name
- Subject string `json:"subject"`
- // Backoff holds parameters applied to connection.
- Backoff *common.Backoff `json:"backoff,omitempty"`
- // conn represents a bare connection to a nats-server.
- conn *natslib.Conn
-}
-
-func parseEventSource(es string) (interface{}, error) {
- var n *natsConfig
- err := yaml.Unmarshal([]byte(es), &n)
- if err != nil {
- return nil, err
- }
- return n, nil
-}
diff --git a/gateways/core/stream/nats/config_test.go b/gateways/core/stream/nats/config_test.go
deleted file mode 100644
index 907edb4adf..0000000000
--- a/gateways/core/stream/nats/config_test.go
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package nats
-
-import (
- "github.com/smartystreets/goconvey/convey"
- "testing"
-)
-
-var es = `
-url: natsConfig://natsConfig.argo-events:4222
-subject: foo
-`
-
-func TestParseConfig(t *testing.T) {
- convey.Convey("Given a nats event source, parse it", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
- _, ok := ps.(*natsConfig)
- convey.So(ok, convey.ShouldEqual, true)
- })
-}
diff --git a/gateways/core/stream/nats/start.go b/gateways/core/stream/nats/start.go
deleted file mode 100644
index 4f6de230d4..0000000000
--- a/gateways/core/stream/nats/start.go
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package nats
-
-import (
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- natslib "github.com/nats-io/go-nats"
- "k8s.io/apimachinery/pkg/util/wait"
-)
-
-// StartEventSource starts an event source
-func (ese *NatsEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
- log := ese.Log.WithField(common.LabelEventSource, eventSource.Name)
- log.Info("operating on event source")
-
- config, err := parseEventSource(eventSource.Data)
- if err != nil {
- log.WithError(err).Error("failed to parse event source")
- return err
- }
-
- dataCh := make(chan []byte)
- errorCh := make(chan error)
- doneCh := make(chan struct{}, 1)
-
- go ese.listenEvents(config.(*natsConfig), eventSource, dataCh, errorCh, doneCh)
-
- return gateways.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, ese.Log)
-}
-
-func (ese *NatsEventSourceExecutor) listenEvents(n *natsConfig, eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
- defer gateways.Recover(eventSource.Name)
-
- log := ese.Log.WithFields(
- map[string]interface{}{
- common.LabelEventSource: eventSource.Name,
- common.LabelURL: n.URL,
- "subject": n.Subject,
- },
- )
-
- if err := gateways.Connect(&wait.Backoff{
- Steps: n.Backoff.Steps,
- Jitter: n.Backoff.Jitter,
- Duration: n.Backoff.Duration,
- Factor: n.Backoff.Factor,
- }, func() error {
- var err error
- if n.conn, err = natslib.Connect(n.URL); err != nil {
- return err
- }
- return nil
- }); err != nil {
- log.WithError(err).Error("connection failed")
- errorCh <- err
- return
- }
-
- log.Info("subscribing to messages")
- _, err := n.conn.Subscribe(n.Subject, func(msg *natslib.Msg) {
- dataCh <- msg.Data
- })
- if err != nil {
- log.WithError(err).Error("failed to subscribe")
- errorCh <- err
- return
- }
- n.conn.Flush()
- if err := n.conn.LastError(); err != nil {
- errorCh <- err
- return
- }
-
- <-doneCh
-}
diff --git a/gateways/core/stream/nats/validate.go b/gateways/core/stream/nats/validate.go
deleted file mode 100644
index 395ccb87b7..0000000000
--- a/gateways/core/stream/nats/validate.go
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package nats
-
-import (
- "context"
- "fmt"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
-)
-
-// ValidateEventSource validates gateway event source
-func (ese *NatsEventSourceExecutor) ValidateEventSource(ctx context.Context, es *gateways.EventSource) (*gateways.ValidEventSource, error) {
- return gwcommon.ValidateGatewayEventSource(es, ArgoEventsEventSourceVersion, parseEventSource, validateNATS)
-}
-
-func validateNATS(config interface{}) error {
- n := config.(*natsConfig)
- if n == nil {
- return fmt.Errorf("configuration must be non empty")
- }
- if n.URL == "" {
- return fmt.Errorf("url must be specified")
- }
- if n.Subject == "" {
- return fmt.Errorf("subject must be specified")
- }
- return nil
-}
diff --git a/gateways/core/stream/nats/validate_test.go b/gateways/core/stream/nats/validate_test.go
deleted file mode 100644
index 951294a5c5..0000000000
--- a/gateways/core/stream/nats/validate_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package nats
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "testing"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
-)
-
-func TestValidateNatsEventSource(t *testing.T) {
- convey.Convey("Given a nats event source spec, parse it and make sure no error occurs", t, func() {
- ese := &NatsEventSourceExecutor{}
- content, err := ioutil.ReadFile(fmt.Sprintf("../%s/%s", gwcommon.EventSourceDir, "nats.yaml"))
- convey.So(err, convey.ShouldBeNil)
-
- var cm *corev1.ConfigMap
- err = yaml.Unmarshal(content, &cm)
- convey.So(err, convey.ShouldBeNil)
- convey.So(cm, convey.ShouldNotBeNil)
-
- err = common.CheckEventSourceVersion(cm)
- convey.So(err, convey.ShouldBeNil)
-
- for key, value := range cm.Data {
- valid, _ := ese.ValidateEventSource(context.Background(), &gateways.EventSource{
- Name: key,
- Id: common.Hasher(key),
- Data: value,
- Version: cm.Labels[common.LabelArgoEventsEventSourceVersion],
- })
- convey.So(valid, convey.ShouldNotBeNil)
- convey.So(valid.IsValid, convey.ShouldBeTrue)
- }
- })
-}
diff --git a/gateways/core/webhook/cmd/main.go b/gateways/core/webhook/cmd/main.go
deleted file mode 100644
index 6941eaf043..0000000000
--- a/gateways/core/webhook/cmd/main.go
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package main
-
-import (
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "github.com/argoproj/argo-events/gateways/core/webhook"
-)
-
-func main() {
- gateways.StartGateway(&webhook.WebhookEventSourceExecutor{
- Log: common.NewArgoEventsLogger(),
- })
-}
diff --git a/gateways/core/webhook/config.go b/gateways/core/webhook/config.go
deleted file mode 100644
index ae37d02620..0000000000
--- a/gateways/core/webhook/config.go
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package webhook
-
-import (
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/sirupsen/logrus"
-)
-
-const ArgoEventsEventSourceVersion = "v0.11"
-
-// WebhookEventSourceExecutor implements Eventing
-type WebhookEventSourceExecutor struct {
- Log *logrus.Logger
-}
-
-type RouteConfig struct {
- Route *gwcommon.Route
-}
-
-func parseEventSource(es string) (interface{}, error) {
- var n *gwcommon.Webhook
- err := yaml.Unmarshal([]byte(es), &n)
- if err != nil {
- return nil, err
- }
- return n, nil
-}
diff --git a/gateways/core/webhook/start.go b/gateways/core/webhook/start.go
deleted file mode 100644
index 615f19ac96..0000000000
--- a/gateways/core/webhook/start.go
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package webhook
-
-import (
- "fmt"
- "github.com/argoproj/argo-events/common"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "io/ioutil"
- "net/http"
-
- "github.com/argoproj/argo-events/gateways"
-)
-
-var (
- helper = gwcommon.NewWebhookHelper()
-)
-
-func init() {
- go gwcommon.InitRouteChannels(helper)
-}
-
-func (rc *RouteConfig) GetRoute() *gwcommon.Route {
- return rc.Route
-}
-
-// RouteHandler handles new route
-func (rc *RouteConfig) RouteHandler(writer http.ResponseWriter, request *http.Request) {
- var response string
-
- r := rc.Route
-
- log := r.Logger.WithFields(
- map[string]interface{}{
- common.LabelEventSource: r.EventSource.Name,
- common.LabelEndpoint: r.Webhook.Endpoint,
- common.LabelPort: r.Webhook.Port,
- common.LabelHTTPMethod: r.Webhook.Method,
- })
-
- log.Info("request received")
-
- if !helper.ActiveEndpoints[r.Webhook.Endpoint].Active {
- response = fmt.Sprintf("the route: endpoint %s and method %s is deactived", r.Webhook.Endpoint, r.Webhook.Method)
- log.Info("endpoint is not active")
- common.SendErrorResponse(writer, response)
- return
- }
-
- if r.Webhook.Method != request.Method {
- log.WithFields(
- map[string]interface{}{
- "expected": r.Webhook.Method,
- "actual": request.Method,
- },
- ).Warn("method mismatch")
-
- common.SendErrorResponse(writer, fmt.Sprintf("the method %s is not defined for endpoint %s", r.Webhook.Method, r.Webhook.Endpoint))
- return
- }
-
- body, err := ioutil.ReadAll(request.Body)
- if err != nil {
- log.WithError(err).Error("failed to parse request body")
- common.SendErrorResponse(writer, fmt.Sprintf("failed to parse request. err: %+v", err))
- return
- }
-
- helper.ActiveEndpoints[r.Webhook.Endpoint].DataCh <- body
- response = "request successfully processed"
- log.Info(response)
- common.SendSuccessResponse(writer, response)
-}
-
-func (rc *RouteConfig) PostStart() error {
- return nil
-}
-
-func (rc *RouteConfig) PostStop() error {
- return nil
-}
-
-// StartEventSource starts a event source
-func (ese *WebhookEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
- defer gateways.Recover(eventSource.Name)
-
- log := ese.Log.WithField(common.LabelEventSource, eventSource.Name)
-
- log.Info("operating on event source")
- config, err := parseEventSource(eventSource.Data)
- if err != nil {
- log.WithError(err).Error("failed to parse event source")
- return err
- }
- h := config.(*gwcommon.Webhook)
- h.Endpoint = gwcommon.FormatWebhookEndpoint(h.Endpoint)
-
- return gwcommon.ProcessRoute(&RouteConfig{
- Route: &gwcommon.Route{
- Logger: ese.Log,
- EventSource: eventSource,
- StartCh: make(chan struct{}),
- Webhook: h,
- },
- }, helper, eventStream)
-}
diff --git a/gateways/core/webhook/start_test.go b/gateways/core/webhook/start_test.go
deleted file mode 100644
index 3c1025b692..0000000000
--- a/gateways/core/webhook/start_test.go
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package webhook
-
-import (
- "bytes"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/smartystreets/goconvey/convey"
- "io/ioutil"
- "net/http"
- "testing"
-)
-
-func TestRouteActiveHandler(t *testing.T) {
- convey.Convey("Given a route configuration", t, func() {
- rc := &RouteConfig{
- Route: gwcommon.GetFakeRoute(),
- }
- r := rc.Route
- r.Webhook.Method = http.MethodGet
- helper.ActiveEndpoints[r.Webhook.Endpoint] = &gwcommon.Endpoint{
- DataCh: make(chan []byte),
- }
-
- writer := &gwcommon.FakeHttpWriter{}
-
- convey.Convey("Inactive route should return error", func() {
- rc.RouteHandler(writer, &http.Request{
- Body: ioutil.NopCloser(bytes.NewReader([]byte("hello"))),
- })
- convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusBadRequest)
- })
-
- helper.ActiveEndpoints[r.Webhook.Endpoint].Active = true
-
- convey.Convey("Active route with correct method should return success", func() {
- dataCh := make(chan []byte)
- go func() {
- resp := <-helper.ActiveEndpoints[r.Webhook.Endpoint].DataCh
- dataCh <- resp
- }()
-
- rc.RouteHandler(writer, &http.Request{
- Body: ioutil.NopCloser(bytes.NewReader([]byte("fake notification"))),
- Method: http.MethodGet,
- })
- convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusOK)
- data := <-dataCh
- convey.So(string(data), convey.ShouldEqual, "fake notification")
- })
-
- convey.Convey("Active route with incorrect method should return failure", func() {
- rc.RouteHandler(writer, &http.Request{
- Body: ioutil.NopCloser(bytes.NewReader([]byte("fake notification"))),
- Method: http.MethodHead,
- })
- convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusBadRequest)
- })
- })
-}
diff --git a/gateways/core/webhook/validate.go b/gateways/core/webhook/validate.go
deleted file mode 100644
index 65bc6eb5f1..0000000000
--- a/gateways/core/webhook/validate.go
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package webhook
-
-import (
- "context"
- "fmt"
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "net/http"
-)
-
-// ValidateEventSource validates webhook event source
-func (ese *WebhookEventSourceExecutor) ValidateEventSource(ctx context.Context, es *gateways.EventSource) (*gateways.ValidEventSource, error) {
- ese.Log.WithFields(
- map[string]interface{}{
- common.LabelEventSource: es.Name,
- common.LabelVersion: es.Version,
- }).Info("validating event source")
- return gwcommon.ValidateGatewayEventSource(es, ArgoEventsEventSourceVersion, parseEventSource, validateWebhook)
-}
-
-func validateWebhook(config interface{}) error {
- w := config.(*gwcommon.Webhook)
- if w == nil {
- return gwcommon.ErrNilEventSource
- }
-
- switch w.Method {
- case http.MethodHead, http.MethodPut, http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodTrace:
- default:
- return fmt.Errorf("unknown HTTP method %s", w.Method)
- }
-
- return gwcommon.ValidateWebhook(w)
-}
diff --git a/gateways/core/webhook/validate_test.go b/gateways/core/webhook/validate_test.go
deleted file mode 100644
index e79c000bb4..0000000000
--- a/gateways/core/webhook/validate_test.go
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package webhook
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "testing"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
-)
-
-func TestValidateEventSource(t *testing.T) {
- convey.Convey("Given a valid webhook event source spec, parse it and make sure no error occurs", t, func() {
- ese := &WebhookEventSourceExecutor{
- Log: common.NewArgoEventsLogger(),
- }
- content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gwcommon.EventSourceDir, "webhook.yaml"))
- convey.So(err, convey.ShouldBeNil)
-
- var cm *corev1.ConfigMap
- err = yaml.Unmarshal(content, &cm)
- convey.So(err, convey.ShouldBeNil)
- convey.So(cm, convey.ShouldNotBeNil)
-
- err = common.CheckEventSourceVersion(cm)
- convey.So(err, convey.ShouldBeNil)
-
- for key, value := range cm.Data {
- valid, _ := ese.ValidateEventSource(context.Background(), &gateways.EventSource{
- Name: key,
- Id: common.Hasher(key),
- Data: value,
- Version: cm.Labels[common.LabelArgoEventsEventSourceVersion],
- })
- convey.So(valid, convey.ShouldNotBeNil)
- convey.So(valid.IsValid, convey.ShouldBeTrue)
- }
- })
-}
-
-func TestValidate(t *testing.T) {
- convey.Convey("Given a webhook, validate it", t, func() {
- w := &gwcommon.Webhook{
- Port: "12000",
- Endpoint: "/",
- Method: "POST",
- }
- err := validateWebhook(w)
- convey.So(err, convey.ShouldBeNil)
- })
-}
diff --git a/gateways/event-source_test.go b/gateways/event-source_test.go
deleted file mode 100644
index 688c420d5e..0000000000
--- a/gateways/event-source_test.go
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package gateways
-
-import (
- "context"
- "fmt"
- "os"
- "sync"
- "testing"
-
- "github.com/argoproj/argo-events/common"
- pc "github.com/argoproj/argo-events/pkg/apis/common"
- "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
- gwfake "github.com/argoproj/argo-events/pkg/client/gateway/clientset/versioned/fake"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/client-go/kubernetes/fake"
-)
-
-func getGatewayConfig() *GatewayConfig {
- return &GatewayConfig{
- Log: common.NewArgoEventsLogger(),
- serverPort: "1234",
- StatusCh: make(chan EventSourceStatus),
- gw: &v1alpha1.Gateway{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-agteway",
- Namespace: "test-nm",
- },
- Spec: v1alpha1.GatewaySpec{
- Watchers: &v1alpha1.NotificationWatchers{
- Sensors: []v1alpha1.SensorNotificationWatcher{},
- },
- EventProtocol: &pc.EventProtocol{
- Type: pc.HTTP,
- Http: pc.Http{
- Port: "9000",
- },
- },
- },
- },
- Clientset: fake.NewSimpleClientset(),
- gwcs: gwfake.NewSimpleClientset(),
- }
-}
-
-type testEventSourceExecutor struct{}
-
-func (ese *testEventSourceExecutor) StartEventSource(eventSource *EventSource, eventStream Eventing_StartEventSourceServer) error {
- defer func() {
- if r := recover(); r != nil {
- fmt.Println(r)
- }
- }()
- _ = eventStream.Send(&Event{
- Name: eventSource.Name,
- Payload: []byte("test payload"),
- })
-
- <-eventStream.Context().Done()
-
- return nil
-}
-
-func (ese *testEventSourceExecutor) ValidateEventSource(ctx context.Context, eventSource *EventSource) (*ValidEventSource, error) {
- return &ValidEventSource{
- IsValid: true,
- }, nil
-}
-
-func TestEventSources(t *testing.T) {
- _ = os.Setenv(common.EnvVarGatewayServerPort, "1234")
- go StartGateway(&testEventSourceExecutor{})
- gc := getGatewayConfig()
-
- var eventSrcCtxMap map[string]*EventSourceContext
- var eventSourceKeys []string
-
- convey.Convey("Given a gateway configmap, create event sources", t, func() {
- cm := &corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Name: "gateway-configmap",
- Namespace: "test-namespace",
- Labels: map[string]string{
- common.LabelArgoEventsEventSourceVersion: "v0.11",
- },
- },
- Data: map[string]string{
- "event-source-1": `
-testKey: testValue
-`,
- },
- }
- fakeclientset := fake.NewSimpleClientset()
- _, err := fakeclientset.CoreV1().ConfigMaps(cm.Namespace).Create(cm)
- convey.So(err, convey.ShouldBeNil)
-
- eventSrcCtxMap, err = gc.createInternalEventSources(cm)
- convey.So(err, convey.ShouldBeNil)
- convey.So(eventSrcCtxMap, convey.ShouldNotBeNil)
- convey.So(len(eventSrcCtxMap), convey.ShouldEqual, 1)
- for _, data := range eventSrcCtxMap {
- convey.So(data.Source.Data, convey.ShouldEqual, `
-testKey: testValue
-`)
- convey.So(data.Source.Version, convey.ShouldEqual, "v0.11")
- }
- })
-
- convey.Convey("Given old and new event sources, return diff", t, func() {
- gc.registeredConfigs = make(map[string]*EventSourceContext)
- staleEventSources, newEventSources := gc.diffEventSources(eventSrcCtxMap)
- convey.So(staleEventSources, convey.ShouldBeEmpty)
- convey.So(newEventSources, convey.ShouldNotBeEmpty)
- convey.So(len(newEventSources), convey.ShouldEqual, 1)
- eventSourceKeys = newEventSources
- })
-
- var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- i := 0
- for event := range gc.StatusCh {
- switch event.Phase {
- case v1alpha1.NodePhaseRunning:
- convey.Convey("Event source is running", t, func() {
- convey.So(i, convey.ShouldEqual, 0)
- convey.So(event.Message, convey.ShouldEqual, "event_source_is_running")
- i++
- go gc.stopEventSources(eventSourceKeys)
- })
- case v1alpha1.NodePhaseError:
- convey.Convey("Event source is in error", t, func() {
- convey.So(i, convey.ShouldNotEqual, 0)
- convey.So(event.Message, convey.ShouldEqual, "failed_to_receive_event_from_event_source_stream")
- })
-
- case v1alpha1.NodePhaseRemove:
- convey.Convey("Event source should be removed", t, func() {
- convey.So(i, convey.ShouldNotEqual, 0)
- convey.So(event.Message, convey.ShouldEqual, "event_source_is_removed")
- })
- goto end
- }
- }
- end:
- wg.Done()
- }()
-
- convey.Convey("Given new event sources, start consuming events", t, func() {
- gc.startEventSources(eventSrcCtxMap, eventSourceKeys)
- wg.Wait()
- })
-}
diff --git a/gateways/event-sources.go b/gateways/event-sources.go
deleted file mode 100644
index 1842820287..0000000000
--- a/gateways/event-sources.go
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package gateways
-
-import (
- "context"
- "fmt"
- "io"
- "time"
-
- "github.com/argoproj/argo-events/pkg/apis/gateway"
-
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
- "google.golang.org/grpc"
- "google.golang.org/grpc/connectivity"
- corev1 "k8s.io/api/core/v1"
-)
-
-// createInternalEventSources creates an internal representation of event source declared in the gateway configmap.
-// returned event sources are map of hash of event source and event source itself.
-// Creating a hash of event source makes it easy to check equality of two event sources.
-func (gc *GatewayConfig) createInternalEventSources(cm *corev1.ConfigMap) (map[string]*EventSourceContext, error) {
- configs := make(map[string]*EventSourceContext)
- for configKey, configValue := range cm.Data {
- hashKey := common.Hasher(configKey + configValue)
- gc.Log.WithFields(
- map[string]interface{}{
- "config-key": configKey,
- "config-value": configValue,
- "hash": string(hashKey),
- },
- ).Info("event source")
-
- // create a connection to gateway server
- ctx, cancel := context.WithCancel(context.Background())
- conn, err := grpc.Dial(
- fmt.Sprintf("localhost:%s", gc.serverPort),
- grpc.WithBlock(),
- grpc.WithInsecure(),
- grpc.WithTimeout(common.ServerConnTimeout*time.Second))
- if err != nil {
- gc.Log.WithError(err).Panic("failed to connect to gateway server")
- cancel()
- return nil, err
- }
-
- gc.Log.WithField("state", conn.GetState().String()).Info("state of the connection")
-
- configs[hashKey] = &EventSourceContext{
- Source: &EventSource{
- Id: hashKey,
- Name: configKey,
- Data: configValue,
- Version: cm.Labels[common.LabelArgoEventsEventSourceVersion],
- },
- Cancel: cancel,
- Ctx: ctx,
- Client: NewEventingClient(conn),
- Conn: conn,
- }
- }
- return configs, nil
-}
-
-// diffConfig diffs currently registered event sources and the event sources in the gateway configmap
-// It simply matches the event source strings. So, if event source string differs through some sequence of definition
-// and although the event sources are actually same, this method will treat them as different event sources.
-// retunrs staleConfig - event sources to be removed from gateway
-// newConfig - new event sources to run
-func (gc *GatewayConfig) diffEventSources(newConfigs map[string]*EventSourceContext) (staleConfigKeys []string, newConfigKeys []string) {
- var currentConfigKeys []string
- var updatedConfigKeys []string
-
- for currentConfigKey := range gc.registeredConfigs {
- currentConfigKeys = append(currentConfigKeys, currentConfigKey)
- }
- for updatedConfigKey := range newConfigs {
- updatedConfigKeys = append(updatedConfigKeys, updatedConfigKey)
- }
-
- gc.Log.WithField("current-event-sources-keys", currentConfigKeys).Debug("event sources hashes")
- gc.Log.WithField("updated-event-sources-keys", updatedConfigKeys).Debug("event sources hashes")
-
- swapped := false
- // iterates over current event sources and updated event sources
- // and creates two arrays, first one containing event sources that need to removed
- // and second containing new event sources that need to be added and run.
- for i := 0; i < 2; i++ {
- for _, cc := range currentConfigKeys {
- found := false
- for _, uc := range updatedConfigKeys {
- if cc == uc {
- found = true
- break
- }
- }
- if !found {
- if swapped {
- newConfigKeys = append(newConfigKeys, cc)
- } else {
- staleConfigKeys = append(staleConfigKeys, cc)
- }
- }
- }
- if i == 0 {
- currentConfigKeys, updatedConfigKeys = updatedConfigKeys, currentConfigKeys
- swapped = true
- }
- }
- return
-}
-
-// startEventSources starts new event sources added to gateway
-func (gc *GatewayConfig) startEventSources(eventSources map[string]*EventSourceContext, keys []string) {
- for _, key := range keys {
- eventSource := eventSources[key]
- // register the event source
- gc.registeredConfigs[key] = eventSource
-
- log := gc.Log.WithField(common.LabelEventSource, eventSource.Source.Name)
-
- log.Info("activating new event source")
-
- go func() {
- // conn should be in READY state
- if eventSource.Conn.GetState() != connectivity.Ready {
- gc.Log.Error("connection is not in ready state.")
- gc.StatusCh <- EventSourceStatus{
- Phase: v1alpha1.NodePhaseError,
- Id: eventSource.Source.Id,
- Message: "connection_is_not_in_ready_state",
- Name: eventSource.Source.Name,
- }
- return
- }
-
- // validate event source
- if valid, _ := eventSource.Client.ValidateEventSource(eventSource.Ctx, eventSource.Source); !valid.IsValid {
- gc.Log.WithFields(
- map[string]interface{}{
- "validation-failure": valid.Reason,
- },
- ).Error("event source is not valid")
- if err := eventSource.Conn.Close(); err != nil {
- gc.Log.WithError(err).Error("failed to close client connection")
- }
- gc.StatusCh <- EventSourceStatus{
- Phase: v1alpha1.NodePhaseError,
- Id: eventSource.Source.Id,
- Message: "event_source_is_not_valid",
- Name: eventSource.Source.Name,
- }
- return
- }
-
- gc.Log.Info("event source is valid")
-
- // mark event source as running
- gc.StatusCh <- EventSourceStatus{
- Phase: v1alpha1.NodePhaseRunning,
- Message: "event_source_is_running",
- Id: eventSource.Source.Id,
- Name: eventSource.Source.Name,
- }
-
- // listen to events from gateway server
- eventStream, err := eventSource.Client.StartEventSource(eventSource.Ctx, eventSource.Source)
- if err != nil {
- gc.Log.WithError(err).Error("error occurred while starting event source")
- gc.StatusCh <- EventSourceStatus{
- Phase: v1alpha1.NodePhaseError,
- Message: "failed_to_receive_event_stream",
- Name: eventSource.Source.Name,
- Id: eventSource.Source.Id,
- }
- return
- }
-
- gc.Log.Info("started listening to events from gateway server")
- for {
- event, err := eventStream.Recv()
- if err != nil {
- if err == io.EOF {
- gc.Log.Info("event source has stopped")
- gc.StatusCh <- EventSourceStatus{
- Phase: v1alpha1.NodePhaseCompleted,
- Message: "event_source_has_been_stopped",
- Name: eventSource.Source.Name,
- Id: eventSource.Source.Id,
- }
- return
- }
-
- gc.Log.WithError(err).Error("failed to receive event from stream")
- gc.StatusCh <- EventSourceStatus{
- Phase: v1alpha1.NodePhaseError,
- Message: "failed_to_receive_event_from_event_source_stream",
- Name: eventSource.Source.Name,
- Id: eventSource.Source.Id,
- }
- return
- }
- err = gc.DispatchEvent(event)
- if err != nil {
- // escalate error through a K8s event
- labels := map[string]string{
- common.LabelEventType: string(common.EscalationEventType),
- common.LabelGatewayEventSourceName: eventSource.Source.Name,
- common.LabelGatewayName: gc.Name,
- common.LabelGatewayEventSourceID: eventSource.Source.Id,
- common.LabelOperation: "dispatch_event_to_watchers",
- }
- if err := common.GenerateK8sEvent(gc.Clientset, fmt.Sprintf("failed to dispatch event to watchers"), common.EscalationEventType, "event dispatch failed", gc.Name, gc.Namespace, gc.controllerInstanceID, gateway.Kind, labels); err != nil {
- gc.Log.WithError(err).Error("failed to create K8s event to escalate event dispatch failure")
- }
- gc.Log.WithError(err).Error("failed to dispatch event to watchers")
- }
- }
- }()
- }
-}
-
-// stopEventSources stops an existing event sources
-func (gc *GatewayConfig) stopEventSources(configs []string) {
- for _, configKey := range configs {
- eventSource := gc.registeredConfigs[configKey]
- delete(gc.registeredConfigs, configKey)
- gc.Log.WithField(common.LabelEventSource, eventSource.Source.Name).Info("removing the event source")
- gc.StatusCh <- EventSourceStatus{
- Phase: v1alpha1.NodePhaseRemove,
- Id: eventSource.Source.Id,
- Message: "event_source_is_removed",
- Name: eventSource.Source.Name,
- }
- eventSource.Cancel()
- if err := eventSource.Conn.Close(); err != nil {
- gc.Log.WithField(common.LabelEventSource, eventSource.Source.Name).WithError(err).Error("failed to close client connection")
- }
- }
-}
-
-// manageEventSources syncs registered event sources and updated gateway configmap
-func (gc *GatewayConfig) manageEventSources(cm *corev1.ConfigMap) error {
- eventSources, err := gc.createInternalEventSources(cm)
- if err != nil {
- return err
- }
-
- staleEventSources, newEventSources := gc.diffEventSources(eventSources)
- gc.Log.WithField(common.LabelEventSource, staleEventSources).Info("stale event sources")
- gc.Log.WithField(common.LabelEventSource, newEventSources).Info("new event sources")
-
- // stop existing event sources
- gc.stopEventSources(staleEventSources)
-
- // start new event sources
- gc.startEventSources(eventSources, newEventSources)
-
- return nil
-}
diff --git a/gateways/eventing.pb.go b/gateways/eventing.pb.go
index 8ce0f0f223..fb1e1e9dff 100644
--- a/gateways/eventing.pb.go
+++ b/gateways/eventing.pb.go
@@ -8,8 +8,6 @@ import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
math "math"
)
@@ -32,9 +30,9 @@ type EventSource struct {
// The event source name.
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
// The event source configuration value.
- Data string `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
- // Version of the event source
- Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"`
+ Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
+ // Type of the event source
+ Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@@ -79,16 +77,16 @@ func (m *EventSource) GetName() string {
return ""
}
-func (m *EventSource) GetData() string {
+func (m *EventSource) GetValue() []byte {
if m != nil {
- return m.Data
+ return m.Value
}
- return ""
+ return nil
}
-func (m *EventSource) GetVersion() string {
+func (m *EventSource) GetType() string {
if m != nil {
- return m.Version
+ return m.Type
}
return ""
}
@@ -204,22 +202,23 @@ func init() {
func init() { proto.RegisterFile("eventing.proto", fileDescriptor_2abcc01b0da84106) }
var fileDescriptor_2abcc01b0da84106 = []byte{
- // 237 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xc1, 0x4a, 0xc3, 0x40,
- 0x10, 0x86, 0xd9, 0x58, 0x9b, 0x38, 0x4a, 0x2d, 0x23, 0xca, 0xd2, 0x93, 0xe4, 0xe4, 0x29, 0x88,
- 0xe2, 0xcd, 0xa3, 0x05, 0xcf, 0x29, 0x78, 0x95, 0xd1, 0x1d, 0xca, 0x42, 0xdd, 0x2d, 0x9b, 0xb5,
- 0xd2, 0xd7, 0xf0, 0x89, 0x25, 0x63, 0x56, 0x97, 0x9c, 0x7a, 0x9b, 0xff, 0x0b, 0x7c, 0xff, 0x4c,
- 0x16, 0x66, 0xbc, 0x63, 0x17, 0xad, 0x5b, 0x37, 0xdb, 0xe0, 0xa3, 0xc7, 0x6a, 0x4d, 0x91, 0xbf,
- 0x68, 0xdf, 0xd5, 0xaf, 0x70, 0xba, 0xec, 0xbf, 0xad, 0xfc, 0x67, 0x78, 0x67, 0x9c, 0x41, 0x61,
- 0x8d, 0x56, 0xd7, 0xea, 0xe6, 0xa4, 0x2d, 0xac, 0x41, 0x84, 0x89, 0xa3, 0x0f, 0xd6, 0x85, 0x10,
- 0x99, 0x7b, 0x66, 0x28, 0x92, 0x3e, 0xfa, 0x65, 0xfd, 0x8c, 0x1a, 0xca, 0x1d, 0x87, 0xce, 0x7a,
- 0xa7, 0x27, 0x82, 0x53, 0xac, 0x1f, 0xe0, 0x58, 0x0a, 0xfe, 0x54, 0x2a, 0x53, 0x69, 0x28, 0xb7,
- 0xb4, 0xdf, 0x78, 0x32, 0xd2, 0x70, 0xd6, 0xa6, 0x58, 0x3f, 0xc1, 0xfc, 0x85, 0x36, 0xd6, 0xe4,
- 0xcb, 0x69, 0x28, 0x6d, 0x27, 0x54, 0x24, 0x55, 0x9b, 0x22, 0x5e, 0xc1, 0x34, 0x30, 0x75, 0xde,
- 0x0d, 0x8b, 0x0e, 0xe9, 0xee, 0x5b, 0x41, 0xb5, 0x1c, 0x4e, 0xc7, 0x47, 0x98, 0xaf, 0x22, 0x85,
- 0x98, 0x2b, 0x2f, 0x9b, 0xf4, 0x27, 0x9a, 0x0c, 0x2f, 0xce, 0x47, 0xf8, 0x56, 0xe1, 0x33, 0x5c,
- 0x48, 0x17, 0x45, 0x3e, 0x40, 0xb0, 0xf8, 0xc7, 0xe3, 0x33, 0xde, 0xa6, 0xf2, 0x06, 0xf7, 0x3f,
- 0x01, 0x00, 0x00, 0xff, 0xff, 0xf3, 0x93, 0xbe, 0x7e, 0x95, 0x01, 0x00, 0x00,
+ // 241 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0x4d, 0x4b, 0xc3, 0x40,
+ 0x10, 0x86, 0xd9, 0xd8, 0x8f, 0x38, 0x96, 0x5a, 0xc6, 0x0f, 0x96, 0x9e, 0x4a, 0x4e, 0x3d, 0x05,
+ 0x51, 0xbc, 0x79, 0xb4, 0xe0, 0x39, 0x05, 0x2f, 0x9e, 0x46, 0x33, 0x94, 0x85, 0xb8, 0x1b, 0x36,
+ 0xdb, 0x4a, 0xfe, 0x86, 0xbf, 0x58, 0x32, 0x4d, 0x74, 0xe9, 0xc9, 0xdb, 0xbc, 0xcf, 0x0e, 0xcf,
+ 0xce, 0x0c, 0xcc, 0xf9, 0xc0, 0x36, 0x18, 0xbb, 0xcb, 0x6b, 0xef, 0x82, 0xc3, 0x74, 0x47, 0x81,
+ 0xbf, 0xa8, 0x6d, 0xb2, 0x37, 0xb8, 0xd8, 0x74, 0x6f, 0x5b, 0xb7, 0xf7, 0x1f, 0x8c, 0x73, 0x48,
+ 0x4c, 0xa9, 0xd5, 0x4a, 0xad, 0xcf, 0x8b, 0xc4, 0x94, 0x88, 0x30, 0xb2, 0xf4, 0xc9, 0x3a, 0x11,
+ 0x22, 0x35, 0x5e, 0xc3, 0xf8, 0x40, 0xd5, 0x9e, 0xf5, 0xd9, 0x4a, 0xad, 0x67, 0xc5, 0x31, 0x74,
+ 0x9d, 0xa1, 0xad, 0x59, 0x8f, 0x8e, 0x9d, 0x5d, 0x9d, 0x3d, 0xc2, 0x58, 0xe4, 0xbf, 0x1a, 0x15,
+ 0x69, 0x34, 0x4c, 0x6b, 0x6a, 0x2b, 0x47, 0xa5, 0xd8, 0x67, 0xc5, 0x10, 0xb3, 0x67, 0x58, 0xbc,
+ 0x52, 0x65, 0xca, 0x78, 0x30, 0x0d, 0x53, 0xd3, 0x08, 0x15, 0x49, 0x5a, 0x0c, 0x11, 0x6f, 0x61,
+ 0xe2, 0x99, 0x1a, 0x67, 0xfb, 0x21, 0xfb, 0x74, 0xff, 0xad, 0x20, 0xdd, 0xf4, 0x6b, 0xe3, 0x13,
+ 0x2c, 0xb6, 0x81, 0x7c, 0x88, 0x95, 0x37, 0xf9, 0x70, 0x85, 0x3c, 0xc2, 0xcb, 0xcb, 0x13, 0x7c,
+ 0xa7, 0xf0, 0x05, 0xae, 0xe4, 0x2f, 0x0a, 0xfc, 0x0f, 0xc1, 0xf2, 0x0f, 0x9f, 0xae, 0xf1, 0x3e,
+ 0x91, 0xfb, 0x3f, 0xfc, 0x04, 0x00, 0x00, 0xff, 0xff, 0x1a, 0xc0, 0xda, 0xf1, 0x91, 0x01, 0x00,
+ 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -297,17 +296,6 @@ type EventingServer interface {
ValidateEventSource(context.Context, *EventSource) (*ValidEventSource, error)
}
-// UnimplementedEventingServer can be embedded to have forward compatible implementations.
-type UnimplementedEventingServer struct {
-}
-
-func (*UnimplementedEventingServer) StartEventSource(req *EventSource, srv Eventing_StartEventSourceServer) error {
- return status.Errorf(codes.Unimplemented, "method StartEventSource not implemented")
-}
-func (*UnimplementedEventingServer) ValidateEventSource(ctx context.Context, req *EventSource) (*ValidEventSource, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ValidateEventSource not implemented")
-}
-
func RegisterEventingServer(s *grpc.Server, srv EventingServer) {
s.RegisterService(&_Eventing_serviceDesc, srv)
}
diff --git a/gateways/eventing.proto b/gateways/eventing.proto
index d45e592d0e..aca646cf90 100644
--- a/gateways/eventing.proto
+++ b/gateways/eventing.proto
@@ -11,9 +11,9 @@ package gateways;
*/
service Eventing {
// StartEventSource starts an event source and returns stream of events.
- rpc StartEventSource(EventSource) returns (stream Event);
+ rpc StartEventSource (EventSource) returns (stream Event);
// ValidateEventSource validates an event source.
- rpc ValidateEventSource(EventSource) returns (ValidEventSource);
+ rpc ValidateEventSource (EventSource) returns (ValidEventSource);
}
/**
@@ -25,9 +25,9 @@ message EventSource {
// The event source name.
string name = 2;
// The event source configuration value.
- string data = 3;
- // Version of the event source
- string version = 4;
+ bytes value = 3;
+ // Type of the event source
+ string type = 4;
}
/**
diff --git a/gateways/core/stream/amqp/Dockerfile b/gateways/server/amqp/Dockerfile
similarity index 100%
rename from gateways/core/stream/amqp/Dockerfile
rename to gateways/server/amqp/Dockerfile
diff --git a/gateways/core/file/cmd/main.go b/gateways/server/amqp/cmd/main.go
similarity index 77%
rename from gateways/core/file/cmd/main.go
rename to gateways/server/amqp/cmd/main.go
index 223ccfdb24..55b914bbaa 100644
--- a/gateways/core/file/cmd/main.go
+++ b/gateways/server/amqp/cmd/main.go
@@ -18,12 +18,12 @@ package main
import (
"github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "github.com/argoproj/argo-events/gateways/core/file"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/amqp"
)
func main() {
- gateways.StartGateway(&file.FileEventSourceExecutor{
- Log: common.NewArgoEventsLogger(),
+ server.StartGateway(&amqp.EventListener{
+ Logger: common.NewArgoEventsLogger(),
})
}
diff --git a/gateways/server/amqp/start.go b/gateways/server/amqp/start.go
new file mode 100644
index 0000000000..2e41349e60
--- /dev/null
+++ b/gateways/server/amqp/start.go
@@ -0,0 +1,135 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package amqp
+
+import (
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ amqplib "github.com/streadway/amqp"
+ "k8s.io/apimachinery/pkg/util/wait"
+)
+
+// EventListener implements Eventing for amqp event source
+type EventListener struct {
+ // Logger logs stuff
+ Logger *logrus.Logger
+}
+
+// StartEventSource starts an event source
+func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
+ listener.Logger.WithField(common.LabelEventSource, eventSource.Name).Infoln("started processing the event source...")
+
+ dataCh := make(chan []byte)
+ errorCh := make(chan error)
+ doneCh := make(chan struct{}, 1)
+
+ go listener.listenEvents(eventSource, dataCh, errorCh, doneCh)
+
+ return server.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, listener.Logger)
+}
+
+// listenEvents listens to events from amqp server
+func (listener *EventListener) listenEvents(eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
+ defer server.Recover(eventSource.Name)
+
+ logger := listener.Logger.WithField(common.LabelEventSource, eventSource.Name)
+
+ logger.Infoln("parsing the event source...")
+ var amqpEventSource *v1alpha1.AMQPEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &amqpEventSource); err != nil {
+ errorCh <- err
+ return
+ }
+
+ var conn *amqplib.Connection
+
+ logger.Infoln("dialing connection...")
+ if err := server.Connect(&wait.Backoff{
+ Steps: amqpEventSource.ConnectionBackoff.Steps,
+ Factor: amqpEventSource.ConnectionBackoff.Factor,
+ Duration: amqpEventSource.ConnectionBackoff.Duration,
+ Jitter: amqpEventSource.ConnectionBackoff.Jitter,
+ }, func() error {
+ var err error
+ conn, err = amqplib.Dial(amqpEventSource.URL)
+ if err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ errorCh <- err
+ return
+ }
+
+ logger.Infoln("opening the server channel...")
+ ch, err := conn.Channel()
+ if err != nil {
+ errorCh <- err
+ return
+ }
+
+ logger.Infoln("setting up the delivery channel...")
+ delivery, err := getDelivery(ch, amqpEventSource)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+
+ logger.Info("listening to messages on channel...")
+ for {
+ select {
+ case msg := <-delivery:
+ logger.Infoln("dispatching event on data channel...")
+ dataCh <- msg.Body
+ case <-doneCh:
+ err = conn.Close()
+ if err != nil {
+ logger.WithError(err).Info("failed to close connection")
+ }
+ return
+ }
+ }
+}
+
+// getDelivery sets up a channel for message deliveries
+func getDelivery(ch *amqplib.Channel, eventSource *v1alpha1.AMQPEventSource) (<-chan amqplib.Delivery, error) {
+ err := ch.ExchangeDeclare(eventSource.ExchangeName, eventSource.ExchangeType, true, false, false, false, nil)
+ if err != nil {
+ return nil, errors.Errorf("failed to declare exchange with name %s and type %s. err: %+v", eventSource.ExchangeName, eventSource.ExchangeType, err)
+ }
+
+ q, err := ch.QueueDeclare("", false, false, true, false, nil)
+ if err != nil {
+ return nil, errors.Errorf("failed to declare queue: %s", err)
+ }
+
+ err = ch.QueueBind(q.Name, eventSource.RoutingKey, eventSource.ExchangeName, false, nil)
+ if err != nil {
+ return nil, errors.Errorf("failed to bind %s exchange '%s' to queue with routingKey: %s: %s", eventSource.ExchangeType, eventSource.ExchangeName, eventSource.RoutingKey, err)
+ }
+
+ delivery, err := ch.Consume(q.Name, "", true, false, false, false, nil)
+ if err != nil {
+ return nil, errors.Errorf("failed to begin consuming messages: %s", err)
+ }
+ return delivery, nil
+}
diff --git a/gateways/server/amqp/validate.go b/gateways/server/amqp/validate.go
new file mode 100644
index 0000000000..fd251f1b40
--- /dev/null
+++ b/gateways/server/amqp/validate.go
@@ -0,0 +1,78 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package amqp
+
+import (
+ "context"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/pkg/errors"
+)
+
+// ValidateEventSource validates gateway event source
+func (listener *EventListener) ValidateEventSource(ctx context.Context, eventSource *gateways.EventSource) (*gateways.ValidEventSource, error) {
+ if apicommon.EventSourceType(eventSource.Type) != apicommon.AMQPEvent {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: common.ErrEventSourceTypeMismatch(string(apicommon.AMQPEvent)),
+ }, nil
+ }
+
+ var amqpEventSource *v1alpha1.AMQPEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &amqpEventSource); err != nil {
+ listener.Logger.WithError(err).Error("failed to parse the event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ if err := validate(amqpEventSource); err != nil {
+ listener.Logger.WithError(err).Error("failed to validate amqp event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ return &gateways.ValidEventSource{
+ IsValid: true,
+ }, nil
+}
+
+func validate(eventSource *v1alpha1.AMQPEventSource) error {
+ if eventSource == nil {
+ return common.ErrNilEventSource
+ }
+ if eventSource.URL == "" {
+ return errors.New("url must be specified")
+ }
+ if eventSource.RoutingKey == "" {
+ return errors.New("routing key must be specified")
+ }
+ if eventSource.ExchangeName == "" {
+ return errors.New("exchange name must be specified")
+ }
+ if eventSource.ExchangeType == "" {
+ return errors.New("exchange type must be specified")
+ }
+ return nil
+}
diff --git a/gateways/server/amqp/validate_test.go b/gateways/server/amqp/validate_test.go
new file mode 100644
index 0000000000..1c1209b291
--- /dev/null
+++ b/gateways/server/amqp/validate_test.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package amqp
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "testing"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValidateAMQPEventSource(t *testing.T) {
+ listener := &EventListener{}
+
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "amqp",
+ Value: nil,
+ Type: "sq",
+ })
+ assert.Equal(t, false, valid.IsValid)
+ assert.Equal(t, common.ErrEventSourceTypeMismatch("amqp"), valid.Reason)
+
+ content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gateways.EventSourceDir, "amqp.yaml"))
+ assert.Nil(t, err)
+
+ var eventSource *v1alpha1.EventSource
+ err = yaml.Unmarshal(content, &eventSource)
+ assert.Nil(t, err)
+
+ for name, value := range eventSource.Spec.AMQP {
+ fmt.Println(name)
+ content, err := yaml.Marshal(value)
+ assert.Nil(t, err)
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "amqp",
+ Value: content,
+ Type: "amqp",
+ })
+ fmt.Println(valid.Reason)
+ assert.Equal(t, true, valid.IsValid)
+ }
+}
diff --git a/gateways/community/aws-sns/Dockerfile b/gateways/server/aws-sns/Dockerfile
similarity index 100%
rename from gateways/community/aws-sns/Dockerfile
rename to gateways/server/aws-sns/Dockerfile
diff --git a/gateways/community/aws-sns/cmd/main.go b/gateways/server/aws-sns/cmd/main.go
similarity index 70%
rename from gateways/community/aws-sns/cmd/main.go
rename to gateways/server/aws-sns/cmd/main.go
index 01bbbf1e1b..88a5ce54b2 100644
--- a/gateways/community/aws-sns/cmd/main.go
+++ b/gateways/server/aws-sns/cmd/main.go
@@ -20,8 +20,8 @@ import (
"os"
"github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "github.com/argoproj/argo-events/gateways/community/aws-sns"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/aws-sns"
"k8s.io/client-go/kubernetes"
)
@@ -32,13 +32,9 @@ func main() {
panic(err)
}
clientset := kubernetes.NewForConfigOrDie(restConfig)
- namespace, ok := os.LookupEnv(common.EnvVarGatewayNamespace)
- if !ok {
- panic("namespace is not provided")
- }
- gateways.StartGateway(&aws_sns.SNSEventSourceExecutor{
- Log: common.NewArgoEventsLogger(),
- Clientset: clientset,
- Namespace: namespace,
+
+ server.StartGateway(&aws_sns.EventListener{
+ Logger: common.NewArgoEventsLogger(),
+ K8sClient: clientset,
})
}
diff --git a/gateways/server/aws-sns/start.go b/gateways/server/aws-sns/start.go
new file mode 100644
index 0000000000..6484435660
--- /dev/null
+++ b/gateways/server/aws-sns/start.go
@@ -0,0 +1,187 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package aws_sns
+
+import (
+ "io/ioutil"
+ "net/http"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server"
+ commonaws "github.com/argoproj/argo-events/gateways/server/common/aws"
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ snslib "github.com/aws/aws-sdk-go/service/sns"
+ "github.com/ghodss/yaml"
+)
+
+var (
+ // controller controls the webhook operations
+ controller = webhook.NewController()
+)
+
+// set up route activation and deactivation channels
+func init() {
+ go webhook.ProcessRouteStatus(controller)
+}
+
+// Implement Router
+// 1. GetRoute
+// 2. HandleRoute
+// 3. PostActivate
+// 4. PostDeactivate
+
+// GetRoute returns the route
+func (router *Router) GetRoute() *webhook.Route {
+ return router.Route
+}
+
+// HandleRoute handles new routes
+func (router *Router) HandleRoute(writer http.ResponseWriter, request *http.Request) {
+ route := router.Route
+
+ logger := route.Logger.WithFields(
+ map[string]interface{}{
+ common.LabelEventSource: route.EventSource.Name,
+ common.LabelEndpoint: route.Context.Endpoint,
+ common.LabelPort: route.Context.Port,
+ common.LabelHTTPMethod: route.Context.Method,
+ })
+
+ logger.Info("request received from event source")
+
+ if !route.Active {
+ logger.Info("endpoint is not active, won't process the request")
+ common.SendErrorResponse(writer, "inactive endpoint")
+ return
+ }
+
+ body, err := ioutil.ReadAll(request.Body)
+ if err != nil {
+ logger.WithError(err).Error("failed to parse the request body")
+ common.SendErrorResponse(writer, err.Error())
+ return
+ }
+
+ logger.WithField("body", string(body)).Debugln("request body")
+
+ var notification *httpNotification
+ err = yaml.Unmarshal(body, ¬ification)
+ if err != nil {
+ logger.WithError(err).Error("failed to convert request payload into sns notification")
+ common.SendErrorResponse(writer, err.Error())
+ return
+ }
+
+ switch notification.Type {
+ case messageTypeSubscriptionConfirmation:
+ awsSession := router.session
+ response, err := awsSession.ConfirmSubscription(&snslib.ConfirmSubscriptionInput{
+ TopicArn: &router.eventSource.TopicArn,
+ Token: ¬ification.Token,
+ })
+ if err != nil {
+ logger.WithError(err).Error("failed to send confirmation response to aws sns")
+ common.SendErrorResponse(writer, err.Error())
+ return
+ }
+ logger.Infoln("subscription successfully confirmed to aws sns")
+ router.subscriptionArn = response.SubscriptionArn
+
+ case messageTypeNotification:
+ logger.Infoln("dispatching notification on route's data channel")
+ route.DataCh <- body
+ }
+
+ logger.Info("request has been successfully processed")
+}
+
+// PostActivate refers to operations performed after a route is successfully activated
+func (router *Router) PostActivate() error {
+ route := router.Route
+
+ logger := route.Logger.WithFields(
+ map[string]interface{}{
+ common.LabelEventSource: route.EventSource.Name,
+ common.LabelEndpoint: route.Context.Endpoint,
+ common.LabelPort: route.Context.Port,
+ common.LabelHTTPMethod: route.Context.Method,
+ "topic-arn": router.eventSource.TopicArn,
+ })
+
+ // In order to successfully subscribe to sns topic,
+ // 1. Fetch credentials if configured explicitly. Users can use something like https://github.com/jtblin/kube2iam
+ // which will help not configure creds explicitly.
+ // 2. Get AWS session
+ // 3. Subscribe to a topic
+
+ logger.Info("subscribing to sns topic...")
+
+ snsEventSource := router.eventSource
+
+ awsSession, err := commonaws.CreateAWSSession(router.k8sClient, snsEventSource.Namespace, snsEventSource.Region, snsEventSource.AccessKey, snsEventSource.SecretKey)
+ if err != nil {
+ return err
+ }
+
+ router.session = snslib.New(awsSession)
+ formattedUrl := common.FormattedURL(snsEventSource.Webhook.URL, snsEventSource.Webhook.Endpoint)
+ if _, err := router.session.Subscribe(&snslib.SubscribeInput{
+ Endpoint: &formattedUrl,
+ Protocol: &snsProtocol,
+ TopicArn: &snsEventSource.TopicArn,
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// PostInactive refers to operations performed after a route is successfully inactivated
+func (router *Router) PostInactivate() error {
+ // After event source is removed, the subscription is cancelled.
+ if _, err := router.session.Unsubscribe(&snslib.UnsubscribeInput{
+ SubscriptionArn: router.subscriptionArn,
+ }); err != nil {
+ return err
+ }
+ return nil
+}
+
+// StartEventSource starts an SNS event source
+func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
+ defer server.Recover(eventSource.Name)
+
+ logger := listener.Logger.WithField(common.LabelEventSource, eventSource.Name)
+ logger.Info("started processing the event source...")
+
+ var snsEventSource *v1alpha1.SNSEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &snsEventSource); err != nil {
+ logger.WithError(err).Error("failed to parse event source")
+ return err
+ }
+
+ route := webhook.NewRoute(snsEventSource.Webhook, listener.Logger, eventSource)
+
+ logger.Infoln("operating on the route...")
+ return webhook.ManageRoute(&Router{
+ Route: route,
+ eventSource: snsEventSource,
+ k8sClient: listener.K8sClient,
+ }, controller, eventStream)
+}
diff --git a/gateways/community/aws-sns/config.go b/gateways/server/aws-sns/types.go
similarity index 61%
rename from gateways/community/aws-sns/config.go
rename to gateways/server/aws-sns/types.go
index b6d74d4500..a44d338b4a 100644
--- a/gateways/community/aws-sns/config.go
+++ b/gateways/server/aws-sns/types.go
@@ -17,18 +17,15 @@ limitations under the License.
package aws_sns
import (
- "github.com/sirupsen/logrus"
"time"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
snslib "github.com/aws/aws-sdk-go/service/sns"
- "github.com/ghodss/yaml"
- corev1 "k8s.io/api/core/v1"
+ "github.com/sirupsen/logrus"
"k8s.io/client-go/kubernetes"
)
-const ArgoEventsEventSourceVersion = "v0.11"
-
const (
messageTypeSubscriptionConfirmation = "SubscriptionConfirmation"
messageTypeNotification = "Notification"
@@ -38,23 +35,26 @@ var (
snsProtocol = "http"
)
-// SNSEventSourceExecutor implements Eventing
-type SNSEventSourceExecutor struct {
- Log *logrus.Logger
- // Clientset is kubernetes client
- Clientset kubernetes.Interface
- // Namespace where gateway is deployed
- Namespace string
+// EventListener implements Eventing for aws sns event source
+type EventListener struct {
+ // Logger to log stuff
+ Logger *logrus.Logger
+ // K8sClient is kubernetes client
+ K8sClient kubernetes.Interface
}
-// RouteConfig contains information for a route
-type RouteConfig struct {
- Route *gwcommon.Route
- snses *snsEventSource
- session *snslib.SNS
+// Router contains information for a route
+type Router struct {
+ // Route contains webhook context and configuration related to api route
+ Route *webhook.Route
+ // eventSource refers to sns event source configuration
+ eventSource *v1alpha1.SNSEventSource
+ // session refers to aws session
+ session *snslib.SNS
+ // subscriptionArn is sns arn
subscriptionArn *string
- clientset kubernetes.Interface
- namespace string
+ // k8sClient is Kubernetes client
+ k8sClient kubernetes.Interface
}
// Json http notifications
@@ -76,22 +76,3 @@ type httpNotification struct {
SigningCertURL string `json:"SigningCertURL"`
UnsubscribeURL string `json:"UnsubscribeURL,omitempty"` // Only for notifications
}
-
-// snsEventSource contains configuration to subscribe to SNS topic
-type snsEventSource struct {
- // Hook defines a webhook.
- Hook *gwcommon.Webhook `json:"hook"`
- TopicArn string `json:"topicArn"`
- AccessKey *corev1.SecretKeySelector `json:"accessKey" protobuf:"bytes,5,opt,name=accessKey"`
- SecretKey *corev1.SecretKeySelector `json:"secretKey" protobuf:"bytes,6,opt,name=secretKey"`
- Region string `json:"region"`
-}
-
-func parseEventSource(es string) (interface{}, error) {
- var ses *snsEventSource
- err := yaml.Unmarshal([]byte(es), &ses)
- if err != nil {
- return nil, err
- }
- return ses, nil
-}
diff --git a/gateways/server/aws-sns/validate.go b/gateways/server/aws-sns/validate.go
new file mode 100644
index 0000000000..0d0d00bca3
--- /dev/null
+++ b/gateways/server/aws-sns/validate.go
@@ -0,0 +1,71 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package aws_sns
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+)
+
+// ValidateEventSource validates sns event source
+func (listener *EventListener) ValidateEventSource(ctx context.Context, eventSource *gateways.EventSource) (*gateways.ValidEventSource, error) {
+ if apicommon.EventSourceType(eventSource.Type) != apicommon.SNSEvent {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: common.ErrEventSourceTypeMismatch(string(apicommon.SNSEvent)),
+ }, nil
+ }
+
+ var snsEventSource *v1alpha1.SNSEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &snsEventSource); err != nil {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ if err := validate(snsEventSource); err != nil {
+ return &gateways.ValidEventSource{
+ Reason: err.Error(),
+ IsValid: false,
+ }, nil
+ }
+
+ return &gateways.ValidEventSource{
+ IsValid: true,
+ }, nil
+}
+
+func validate(snsEventSource *v1alpha1.SNSEventSource) error {
+ if snsEventSource == nil {
+ return common.ErrNilEventSource
+ }
+ if snsEventSource.TopicArn == "" {
+ return fmt.Errorf("must specify topic arn")
+ }
+ if snsEventSource.Region == "" {
+ return fmt.Errorf("must specify region")
+ }
+ return webhook.ValidateWebhookContext(snsEventSource.Webhook)
+}
diff --git a/gateways/server/aws-sns/validate_test.go b/gateways/server/aws-sns/validate_test.go
new file mode 100644
index 0000000000..84b346a733
--- /dev/null
+++ b/gateways/server/aws-sns/validate_test.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package aws_sns
+
+import (
+ "context"
+ "fmt"
+ "github.com/argoproj/argo-events/common"
+ "io/ioutil"
+ "testing"
+
+ "github.com/argoproj/argo-events/gateways"
+ esv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestSNSEventSourceExecutor_ValidateEventSource(t *testing.T) {
+ listener := &EventListener{}
+
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "sns",
+ Value: nil,
+ Type: "sq",
+ })
+ assert.Equal(t, false, valid.IsValid)
+ assert.Equal(t, common.ErrEventSourceTypeMismatch("sns"), valid.Reason)
+
+ content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gateways.EventSourceDir, "aws-sns.yaml"))
+ assert.Nil(t, err)
+
+ var eventSource *esv1alpha1.EventSource
+ err = yaml.Unmarshal(content, &eventSource)
+ assert.Nil(t, err)
+
+ for name, value := range eventSource.Spec.SNS {
+ fmt.Println(name)
+ content, err := yaml.Marshal(value)
+ assert.Nil(t, err)
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "sns",
+ Value: content,
+ Type: "sns",
+ })
+ fmt.Println(valid.Reason)
+ assert.Equal(t, true, valid.IsValid)
+ }
+}
diff --git a/gateways/community/aws-sqs/Dockerfile b/gateways/server/aws-sqs/Dockerfile
similarity index 100%
rename from gateways/community/aws-sqs/Dockerfile
rename to gateways/server/aws-sqs/Dockerfile
diff --git a/gateways/core/artifact/cmd/main.go b/gateways/server/aws-sqs/cmd/main.go
similarity index 76%
rename from gateways/core/artifact/cmd/main.go
rename to gateways/server/aws-sqs/cmd/main.go
index 2e1688301f..52a498d568 100644
--- a/gateways/core/artifact/cmd/main.go
+++ b/gateways/server/aws-sqs/cmd/main.go
@@ -20,8 +20,8 @@ import (
"os"
"github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "github.com/argoproj/argo-events/gateways/core/artifact"
+ "github.com/argoproj/argo-events/gateways/server"
+ aws_sqs "github.com/argoproj/argo-events/gateways/server/aws-sqs"
"k8s.io/client-go/kubernetes"
)
@@ -32,13 +32,15 @@ func main() {
panic(err)
}
clientset := kubernetes.NewForConfigOrDie(restConfig)
- namespace, ok := os.LookupEnv(common.EnvVarGatewayNamespace)
+
+ namespace, ok := os.LookupEnv(common.EnvVarNamespace)
if !ok {
panic("namespace is not provided")
}
- gateways.StartGateway(&artifact.S3EventSourceExecutor{
- Log: common.NewArgoEventsLogger(),
- Clientset: clientset,
+
+ server.StartGateway(&aws_sqs.EventListener{
+ Logger: common.NewArgoEventsLogger(),
+ K8sClient: clientset,
Namespace: namespace,
})
}
diff --git a/gateways/server/aws-sqs/start.go b/gateways/server/aws-sqs/start.go
new file mode 100644
index 0000000000..972bb737fa
--- /dev/null
+++ b/gateways/server/aws-sqs/start.go
@@ -0,0 +1,121 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package aws_sqs
+
+import (
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server"
+ commonaws "github.com/argoproj/argo-events/gateways/server/common/aws"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/session"
+ sqslib "github.com/aws/aws-sdk-go/service/sqs"
+ "github.com/ghodss/yaml"
+ "github.com/sirupsen/logrus"
+ "k8s.io/client-go/kubernetes"
+)
+
+// EventListener implements Eventing for aws sqs event source
+type EventListener struct {
+ Logger *logrus.Logger
+ // k8sClient is kubernetes client
+ K8sClient kubernetes.Interface
+ // Namespace where gateway is deployed
+ Namespace string
+}
+
+// StartEventSource starts an event source
+func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
+ defer server.Recover(eventSource.Name)
+
+ log := listener.Logger.WithField(common.LabelEventSource, eventSource.Name)
+ log.Info("started processing the event source...")
+
+ dataCh := make(chan []byte)
+ errorCh := make(chan error)
+ doneCh := make(chan struct{}, 1)
+
+ go listener.listenEvents(eventSource, dataCh, errorCh, doneCh)
+ return server.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, listener.Logger)
+}
+
+// listenEvents fires an event when interval completes and item is processed from queue.
+func (listener *EventListener) listenEvents(eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
+ var sqsEventSource *v1alpha1.SQSEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &sqsEventSource); err != nil {
+ errorCh <- err
+ return
+ }
+
+ var awsSession *session.Session
+
+ listener.Logger.WithField(common.LabelEventSource, eventSource.Name).Infoln("setting up aws session...")
+ awsSession, err := commonaws.CreateAWSSession(listener.K8sClient, sqsEventSource.Namespace, sqsEventSource.Region, sqsEventSource.AccessKey, sqsEventSource.SecretKey)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+
+ sqsClient := sqslib.New(awsSession)
+
+ listener.Logger.WithField(common.LabelEventSource, eventSource.Name).Infoln("fetching queue url...")
+ queueURL, err := sqsClient.GetQueueUrl(&sqslib.GetQueueUrlInput{
+ QueueName: &sqsEventSource.Queue,
+ })
+ if err != nil {
+ errorCh <- err
+ return
+ }
+
+ listener.Logger.WithField(common.LabelEventSource, eventSource.Name).Infoln("listening for messages on the queue...")
+ for {
+ select {
+ case <-doneCh:
+ return
+
+ default:
+ msg, err := sqsClient.ReceiveMessage(&sqslib.ReceiveMessageInput{
+ QueueUrl: queueURL.QueueUrl,
+ MaxNumberOfMessages: aws.Int64(1),
+ WaitTimeSeconds: aws.Int64(sqsEventSource.WaitTimeSeconds),
+ })
+ if err != nil {
+ listener.Logger.WithField(common.LabelEventSource, eventSource.Name).WithError(err).Error("failed to process item from queue, waiting for next timeout")
+ continue
+ }
+
+ if msg != nil && len(msg.Messages) > 0 {
+ listener.Logger.WithField(common.LabelEventSource, eventSource.Name).Infoln("dispatching message from queue on data channel")
+ listener.Logger.WithFields(map[string]interface{}{
+ common.LabelEventSource: eventSource.Name,
+ "message": *msg.Messages[0].Body,
+ }).Debugln("message from queue")
+
+ dataCh <- []byte(*msg.Messages[0].Body)
+
+ if _, err := sqsClient.DeleteMessage(&sqslib.DeleteMessageInput{
+ QueueUrl: queueURL.QueueUrl,
+ ReceiptHandle: msg.Messages[0].ReceiptHandle,
+ }); err != nil {
+ errorCh <- err
+ return
+ }
+ }
+ }
+ }
+}
diff --git a/gateways/server/aws-sqs/validate.go b/gateways/server/aws-sqs/validate.go
new file mode 100644
index 0000000000..bf699f6d53
--- /dev/null
+++ b/gateways/server/aws-sqs/validate.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package aws_sqs
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+)
+
+// ValidateEventSource validates sqs event source
+func (listener *EventListener) ValidateEventSource(ctx context.Context, eventSource *gateways.EventSource) (*gateways.ValidEventSource, error) {
+ if apicommon.EventSourceType(eventSource.Type) != apicommon.SQSEvent {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: common.ErrEventSourceTypeMismatch(string(apicommon.SQSEvent)),
+ }, nil
+ }
+
+ var sqsEventSource *v1alpha1.SQSEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &sqsEventSource); err != nil {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ if err := validate(sqsEventSource); err != nil {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ return &gateways.ValidEventSource{
+ IsValid: true,
+ }, nil
+}
+
+func validate(eventSource *v1alpha1.SQSEventSource) error {
+ if eventSource == nil {
+ return common.ErrNilEventSource
+ }
+ if eventSource.WaitTimeSeconds == 0 {
+ return fmt.Errorf("must specify polling timeout")
+ }
+ if eventSource.Region == "" {
+ return fmt.Errorf("must specify region")
+ }
+ if eventSource.Queue == "" {
+ return fmt.Errorf("must specify queue name")
+ }
+ return nil
+}
diff --git a/gateways/server/aws-sqs/validate_test.go b/gateways/server/aws-sqs/validate_test.go
new file mode 100644
index 0000000000..0cbcff1c23
--- /dev/null
+++ b/gateways/server/aws-sqs/validate_test.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package aws_sqs
+
+import (
+ "context"
+ "fmt"
+ "github.com/argoproj/argo-events/common"
+ "io/ioutil"
+ "testing"
+
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValidateEventSource(t *testing.T) {
+ listener := &EventListener{}
+
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "sns",
+ Value: nil,
+ Type: "sq",
+ })
+ assert.Equal(t, false, valid.IsValid)
+ assert.Equal(t, common.ErrEventSourceTypeMismatch("sqs"), valid.Reason)
+
+ content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gateways.EventSourceDir, "aws-sqs.yaml"))
+ assert.Nil(t, err)
+
+ var eventSource *v1alpha1.EventSource
+ err = yaml.Unmarshal(content, &eventSource)
+ assert.Nil(t, err)
+
+ for name, value := range eventSource.Spec.SQS {
+ fmt.Println(name)
+ content, err := yaml.Marshal(value)
+ assert.Nil(t, err)
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "sqs",
+ Value: content,
+ Type: "sqs",
+ })
+ fmt.Println(valid.Reason)
+ assert.Equal(t, true, valid.IsValid)
+ }
+}
diff --git a/gateways/core/calendar/Dockerfile b/gateways/server/calendar/Dockerfile
similarity index 100%
rename from gateways/core/calendar/Dockerfile
rename to gateways/server/calendar/Dockerfile
diff --git a/gateways/server/calendar/cmd/main.go b/gateways/server/calendar/cmd/main.go
new file mode 100644
index 0000000000..dd6c673bab
--- /dev/null
+++ b/gateways/server/calendar/cmd/main.go
@@ -0,0 +1,29 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/calendar"
+)
+
+func main() {
+ server.StartGateway(&calendar.EventListener{
+ Logger: common.NewArgoEventsLogger(),
+ })
+}
diff --git a/gateways/server/calendar/start.go b/gateways/server/calendar/start.go
new file mode 100644
index 0000000000..9bf2a1ff31
--- /dev/null
+++ b/gateways/server/calendar/start.go
@@ -0,0 +1,164 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package calendar
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/pkg/errors"
+ cronlib "github.com/robfig/cron"
+ "github.com/sirupsen/logrus"
+)
+
+// EventListener implements Eventing for calendar based events
+type EventListener struct {
+ // Logger to log stuff
+ Logger *logrus.Logger
+}
+
+// response is the event payload that is sent as response to sensor
+type response struct {
+ // EventTime is time at which event occurred
+ EventTime time.Time `json:"eventTime"`
+ // UserPayload if any
+ UserPayload *json.RawMessage `json:"userPayload"`
+}
+
+// Next is a function to compute the next event time from a given time
+type Next func(time.Time) time.Time
+
+// StartEventSource starts an event source
+func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
+ listener.Logger.WithField(common.LabelEventSource, eventSource.Name).Infoln("started processing the event source...")
+
+ dataCh := make(chan []byte)
+ errorCh := make(chan error)
+ doneCh := make(chan struct{}, 1)
+
+ go listener.listenEvents(eventSource, dataCh, errorCh, doneCh)
+
+ return server.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, listener.Logger)
+}
+
+// listenEvents fires an event when schedule completes.
+func (listener *EventListener) listenEvents(eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
+ defer server.Recover(eventSource.Name)
+
+ logger := listener.Logger.WithField(common.LabelEventSource, eventSource.Name)
+
+ logger.Infoln("parsing calendar event source...")
+ var calendarEventSource *v1alpha1.CalendarEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &calendarEventSource); err != nil {
+ errorCh <- err
+ return
+ }
+
+ logger.Infoln("resolving calendar schedule...")
+ schedule, err := resolveSchedule(calendarEventSource)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+
+ logger.Infoln("parsing exclusion dates if any...")
+ exDates, err := common.ParseExclusionDates(calendarEventSource.ExclusionDates)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+
+ var next Next
+ next = func(last time.Time) time.Time {
+ nextT := schedule.Next(last)
+ nextYear := nextT.Year()
+ nextMonth := nextT.Month()
+ nextDay := nextT.Day()
+ for _, exDate := range exDates {
+ // if exDate == nextEvent, then we need to skip this and get the next
+ if exDate.Year() == nextYear && exDate.Month() == nextMonth && exDate.Day() == nextDay {
+ return next(nextT)
+ }
+ }
+ return nextT
+ }
+
+ lastT := time.Now()
+ var location *time.Location
+ if calendarEventSource.Timezone != "" {
+ logger.WithField("location", calendarEventSource.Timezone).Infoln("loading location for the schedule...")
+ location, err = time.LoadLocation(calendarEventSource.Timezone)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+ lastT = lastT.In(location)
+ }
+
+ for {
+ t := next(lastT)
+ timer := time.After(time.Until(t))
+ logger.WithField(common.LabelTime, t.UTC().String()).Info("expected next calendar event")
+ select {
+ case tx := <-timer:
+ lastT = tx
+ if location != nil {
+ lastT = lastT.In(location)
+ }
+ response := &response{
+ EventTime: tx,
+ UserPayload: calendarEventSource.UserPayload,
+ }
+ payload, err := json.Marshal(response)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+ logger.Infoln("event dispatched on data channel")
+ dataCh <- payload
+ case <-doneCh:
+ return
+ }
+ }
+}
+
+// resolveSchedule parses the schedule and returns a valid cron schedule
+func resolveSchedule(cal *v1alpha1.CalendarEventSource) (cronlib.Schedule, error) {
+ if cal.Schedule != "" {
+ // standard cron expression
+ specParser := cronlib.NewParser(cronlib.Minute | cronlib.Hour | cronlib.Dom | cronlib.Month | cronlib.Dow)
+ schedule, err := specParser.Parse(cal.Schedule)
+ if err != nil {
+ return nil, errors.Errorf("failed to parse schedule %s from calendar event. Cause: %+v", cal.Schedule, err.Error())
+ }
+ return schedule, nil
+ } else if cal.Interval != "" {
+ intervalDuration, err := time.ParseDuration(cal.Interval)
+ if err != nil {
+ return nil, errors.Errorf("failed to parse interval %s from calendar event. Cause: %+v", cal.Interval, err.Error())
+ }
+ schedule := cronlib.ConstantDelaySchedule{Delay: intervalDuration}
+ return schedule, nil
+ } else {
+ return nil, errors.New("calendar event must contain either a schedule or interval")
+ }
+}
diff --git a/gateways/core/calendar/start_test.go b/gateways/server/calendar/start_test.go
similarity index 66%
rename from gateways/core/calendar/start_test.go
rename to gateways/server/calendar/start_test.go
index e2ffb3982f..29d22879cc 100644
--- a/gateways/core/calendar/start_test.go
+++ b/gateways/server/calendar/start_test.go
@@ -17,20 +17,22 @@ limitations under the License.
package calendar
import (
+ "encoding/json"
+ "testing"
+
"github.com/argoproj/argo-events/common"
"github.com/argoproj/argo-events/gateways"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
"github.com/ghodss/yaml"
"github.com/smartystreets/goconvey/convey"
- "testing"
)
func TestResolveSchedule(t *testing.T) {
convey.Convey("Given a calendar schedule, resolve it", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
-
- schedule, err := resolveSchedule(ps.(*calSchedule))
+ schedule, err := resolveSchedule(&v1alpha1.CalendarEventSource{
+ Schedule: "* * * * *",
+ })
convey.So(err, convey.ShouldBeNil)
convey.So(schedule, convey.ShouldNotBeNil)
})
@@ -38,13 +40,10 @@ func TestResolveSchedule(t *testing.T) {
func TestListenEvents(t *testing.T) {
convey.Convey("Given a calendar schedule, listen events", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
-
- ese := &CalendarEventSourceExecutor{
- Log: common.NewArgoEventsLogger(),
+ listener := &EventListener{
+ Logger: common.NewArgoEventsLogger(),
}
+
dataCh := make(chan []byte)
errorCh := make(chan error)
doneCh := make(chan struct{}, 1)
@@ -55,20 +54,32 @@ func TestListenEvents(t *testing.T) {
dataCh2 <- data
}()
- go ese.listenEvents(ps.(*calSchedule), &gateways.EventSource{
- Name: "fake",
- Data: es,
- Id: "1234",
+ payload := []byte(`"{\r\n\"hello\": \"world\"\r\n}"`)
+ raw := json.RawMessage(payload)
+
+ calendarEventSource := &v1alpha1.CalendarEventSource{
+ Schedule: "* * * * *",
+ UserPayload: &raw,
+ }
+
+ body, err := yaml.Marshal(calendarEventSource)
+ convey.So(err, convey.ShouldBeNil)
+
+ go listener.listenEvents(&gateways.EventSource{
+ Name: "fake",
+ Value: body,
+ Id: "1234",
+ Type: string(apicommon.CalendarEvent),
}, dataCh, errorCh, doneCh)
data := <-dataCh2
doneCh <- struct{}{}
- var cal *calResponse
+ var cal *response
err = yaml.Unmarshal(data, &cal)
convey.So(err, convey.ShouldBeNil)
- payload, err := cal.UserPayload.MarshalJSON()
+ payload, err = cal.UserPayload.MarshalJSON()
convey.So(err, convey.ShouldBeNil)
convey.So(string(payload), convey.ShouldEqual, `"{\r\n\"hello\": \"world\"\r\n}"`)
diff --git a/gateways/server/calendar/validate.go b/gateways/server/calendar/validate.go
new file mode 100644
index 0000000000..602094c885
--- /dev/null
+++ b/gateways/server/calendar/validate.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package calendar
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+)
+
+const ()
+
+// ValidateEventSource validates calendar event source
+func (listener *EventListener) ValidateEventSource(ctx context.Context, eventSource *gateways.EventSource) (*gateways.ValidEventSource, error) {
+ if apicommon.EventSourceType(eventSource.Type) != apicommon.CalendarEvent {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: common.ErrEventSourceTypeMismatch(string(apicommon.CalendarEvent)),
+ }, nil
+ }
+
+ var calendarEventSource *v1alpha1.CalendarEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &calendarEventSource); err != nil {
+ listener.Logger.WithError(err).WithField(common.LabelEventSource, eventSource.Name).Errorln("failed to parse the event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ if err := validate(calendarEventSource); err != nil {
+ listener.Logger.WithError(err).WithField(common.LabelEventSource, eventSource.Name).Errorln("failed to validate the event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ return &gateways.ValidEventSource{
+ IsValid: true,
+ }, nil
+}
+
+func validate(calendarEventSource *v1alpha1.CalendarEventSource) error {
+ if calendarEventSource == nil {
+ return common.ErrNilEventSource
+ }
+ if calendarEventSource.Schedule == "" && calendarEventSource.Interval == "" {
+ return fmt.Errorf("must have either schedule or interval")
+ }
+ if _, err := resolveSchedule(calendarEventSource); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/gateways/server/calendar/validate_test.go b/gateways/server/calendar/validate_test.go
new file mode 100644
index 0000000000..002e56c257
--- /dev/null
+++ b/gateways/server/calendar/validate_test.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package calendar
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "testing"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestEventSourceListener_ValidateEventSource(t *testing.T) {
+ listener := &EventListener{}
+
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "calendar",
+ Value: nil,
+ Type: "sq",
+ })
+ assert.Equal(t, false, valid.IsValid)
+ assert.Equal(t, common.ErrEventSourceTypeMismatch("calendar"), valid.Reason)
+
+ content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gateways.EventSourceDir, "calendar.yaml"))
+ assert.Nil(t, err)
+
+ var eventSource *v1alpha1.EventSource
+ err = yaml.Unmarshal(content, &eventSource)
+ assert.Nil(t, err)
+
+ for name, value := range eventSource.Spec.Calendar {
+ fmt.Println(name)
+ content, err := yaml.Marshal(value)
+ assert.Nil(t, err)
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "calendar",
+ Value: content,
+ Type: "calendar",
+ })
+ fmt.Println(valid.Reason)
+ assert.Equal(t, true, valid.IsValid)
+ }
+}
diff --git a/gateways/common/aws.go b/gateways/server/common/aws/aws.go
similarity index 77%
rename from gateways/common/aws.go
rename to gateways/server/common/aws/aws.go
index 7f0c908050..aab8acd08f 100644
--- a/gateways/common/aws.go
+++ b/gateways/server/common/aws/aws.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package common
+package aws
import (
"github.com/argoproj/argo-events/store"
@@ -54,3 +54,17 @@ func GetAWSSessionWithoutCreds(region string) (*session.Session, error) {
Region: ®ion,
})
}
+
+// CreateAWSSession based on credentials settings return a aws session
+func CreateAWSSession(client kubernetes.Interface, namespace, region string, accessKey *corev1.SecretKeySelector, secretKey *corev1.SecretKeySelector) (*session.Session, error) {
+ if accessKey == nil && secretKey == nil {
+ return GetAWSSessionWithoutCreds(region)
+ }
+
+ creds, err := GetAWSCreds(client, namespace, accessKey, secretKey)
+ if err != nil {
+ return nil, err
+ }
+
+ return GetAWSSession(creds, region)
+}
diff --git a/gateways/common/aws_test.go b/gateways/server/common/aws/aws_test.go
similarity index 99%
rename from gateways/common/aws_test.go
rename to gateways/server/common/aws/aws_test.go
index 9b08b578df..289f615f11 100644
--- a/gateways/common/aws_test.go
+++ b/gateways/server/common/aws/aws_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package common
+package aws
import (
"testing"
diff --git a/gateways/server/common/fake.go b/gateways/server/common/fake.go
new file mode 100644
index 0000000000..24527a62c2
--- /dev/null
+++ b/gateways/server/common/fake.go
@@ -0,0 +1,41 @@
+package common
+
+import (
+ "context"
+ "github.com/argoproj/argo-events/gateways"
+ "google.golang.org/grpc/metadata"
+)
+
+type FakeGRPCStream struct {
+ SentData *gateways.Event
+ Ctx context.Context
+}
+
+func (f *FakeGRPCStream) Send(event *gateways.Event) error {
+ f.SentData = event
+ return nil
+}
+
+func (f *FakeGRPCStream) SetHeader(metadata.MD) error {
+ return nil
+}
+
+func (f *FakeGRPCStream) SendHeader(metadata.MD) error {
+ return nil
+}
+
+func (f *FakeGRPCStream) SetTrailer(metadata.MD) {
+ return
+}
+
+func (f *FakeGRPCStream) Context() context.Context {
+ return f.Ctx
+}
+
+func (f *FakeGRPCStream) SendMsg(m interface{}) error {
+ return nil
+}
+
+func (f *FakeGRPCStream) RecvMsg(m interface{}) error {
+ return nil
+}
diff --git a/gateways/common/config.go b/gateways/server/common/fsevent/config.go
similarity index 98%
rename from gateways/common/config.go
rename to gateways/server/common/fsevent/config.go
index 212e7aa06f..9a802fc045 100644
--- a/gateways/common/config.go
+++ b/gateways/server/common/fsevent/config.go
@@ -1,4 +1,4 @@
-package common
+package fsevent
import (
"errors"
diff --git a/gateways/common/config_test.go b/gateways/server/common/fsevent/config_test.go
similarity index 98%
rename from gateways/common/config_test.go
rename to gateways/server/common/fsevent/config_test.go
index 78d75d3a5b..eeef933cff 100644
--- a/gateways/common/config_test.go
+++ b/gateways/server/common/fsevent/config_test.go
@@ -1,4 +1,4 @@
-package common
+package fsevent
import (
"testing"
diff --git a/gateways/common/fsevent/fileevent.go b/gateways/server/common/fsevent/fileevent.go
similarity index 100%
rename from gateways/common/fsevent/fileevent.go
rename to gateways/server/common/fsevent/fileevent.go
diff --git a/gateways/common/naivewatcher/mutex.go b/gateways/server/common/naivewatcher/mutex.go
similarity index 100%
rename from gateways/common/naivewatcher/mutex.go
rename to gateways/server/common/naivewatcher/mutex.go
diff --git a/gateways/common/naivewatcher/watcher.go b/gateways/server/common/naivewatcher/watcher.go
similarity index 98%
rename from gateways/common/naivewatcher/watcher.go
rename to gateways/server/common/naivewatcher/watcher.go
index e615e259ad..8affdc2719 100644
--- a/gateways/common/naivewatcher/watcher.go
+++ b/gateways/server/common/naivewatcher/watcher.go
@@ -7,7 +7,7 @@ import (
"sync"
"time"
- "github.com/argoproj/argo-events/gateways/common/fsevent"
+ "github.com/argoproj/argo-events/gateways/server/common/fsevent"
)
const (
diff --git a/gateways/common/naivewatcher/watcher_test.go b/gateways/server/common/naivewatcher/watcher_test.go
similarity index 98%
rename from gateways/common/naivewatcher/watcher_test.go
rename to gateways/server/common/naivewatcher/watcher_test.go
index f79f1a639c..2ed2e02c18 100644
--- a/gateways/common/naivewatcher/watcher_test.go
+++ b/gateways/server/common/naivewatcher/watcher_test.go
@@ -8,7 +8,7 @@ import (
"testing"
"time"
- "github.com/argoproj/argo-events/gateways/common/fsevent"
+ "github.com/argoproj/argo-events/gateways/server/common/fsevent"
"github.com/stretchr/testify/assert"
)
diff --git a/gateways/server/common/webhook/fake.go b/gateways/server/common/webhook/fake.go
new file mode 100644
index 0000000000..3ef30c999e
--- /dev/null
+++ b/gateways/server/common/webhook/fake.go
@@ -0,0 +1,76 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webhook
+
+import (
+ "net/http"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+)
+
+var Hook = &Context{
+ Endpoint: "/fake",
+ Port: "12000",
+ URL: "test-url",
+}
+
+type FakeHttpWriter struct {
+ HeaderStatus int
+ Payload []byte
+}
+
+func (f *FakeHttpWriter) Header() http.Header {
+ return http.Header{}
+}
+
+func (f *FakeHttpWriter) Write(body []byte) (int, error) {
+ f.Payload = body
+ return len(body), nil
+}
+
+func (f *FakeHttpWriter) WriteHeader(status int) {
+ f.HeaderStatus = status
+}
+
+type FakeRouter struct {
+ route *Route
+}
+
+func (f *FakeRouter) GetRoute() *Route {
+ return f.route
+}
+
+func (f *FakeRouter) HandleRoute(writer http.ResponseWriter, request *http.Request) {
+}
+
+func (f *FakeRouter) PostActivate() error {
+ return nil
+}
+
+func (f *FakeRouter) PostInactivate() error {
+ return nil
+}
+
+func GetFakeRoute() *Route {
+ logger := common.NewArgoEventsLogger()
+ return NewRoute(Hook, logger, &gateways.EventSource{
+ Name: "fake-event-source",
+ Value: []byte("hello"),
+ Id: "123",
+ })
+}
diff --git a/gateways/server/common/webhook/types.go b/gateways/server/common/webhook/types.go
new file mode 100644
index 0000000000..e0d5348136
--- /dev/null
+++ b/gateways/server/common/webhook/types.go
@@ -0,0 +1,92 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webhook
+
+import (
+ "net/http"
+ "sync"
+
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/gorilla/mux"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ // Mutex synchronizes ActiveServerHandlers
+ Lock sync.Mutex
+)
+
+// Router is an interface to manage the route
+type Router interface {
+ // GetRoute returns the route
+ GetRoute() *Route
+ // HandleRoute processes the incoming requests on the route
+ HandleRoute(writer http.ResponseWriter, request *http.Request)
+ // PostActivate captures the operations if any after route being activated and ready to process requests.
+ PostActivate() error
+ // PostInactivate captures cleanup operations if any after route is inactivated
+ PostInactivate() error
+}
+
+// Route contains general information about a route
+type Route struct {
+ // Context refers to the webhook context
+ Context *Context
+ // Logger to log stuff
+ Logger *logrus.Logger
+ // StartCh controls the
+ StartCh chan struct{}
+ // EventSource refers to gateway event source
+ EventSource *gateways.EventSource
+ // active determines whether the route is active and ready to process incoming requets
+ // or it is an inactive route
+ Active bool
+ // data channel to receive data on this endpoint
+ DataCh chan []byte
+ // initialized indicates whether the route has been initialized and there exist a http router
+ // to process incoming requests
+ initialized bool
+}
+
+// Controller controls the active servers and endpoints
+type Controller struct {
+ // ActiveServerHandlers keeps track of currently active mux/router for the http servers.
+ ActiveServerHandlers map[string]*mux.Router
+ // ActiveRoutes keep track of routes that are already registered with server and their status active or inactive
+ ActiveRoutes map[string]*Route
+ // RouteActivateChan handles activation of routes
+ RouteActivateChan chan Router
+ // RouteDeactivateChan handles inactivation of routes
+ RouteDeactivateChan chan Router
+}
+
+// Context holds a general purpose REST API context
+type Context struct {
+ // REST API endpoint
+ Endpoint string `json:"endpoint" protobuf:"bytes,1,name=endpoint"`
+ // Method is HTTP request method that indicates the desired action to be performed for a given resource.
+ // See RFC7231 Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content
+ Method string `json:"method" protobuf:"bytes,2,name=method"`
+ // Port on which HTTP server is listening for incoming events.
+ Port string `json:"port" protobuf:"bytes,3,name=port"`
+ // URL is the url of the server.
+ URL string `json:"url" protobuf:"bytes,4,name=url"`
+ // ServerCertPath refers the file that contains the cert.
+ ServerCertPath string `json:"serverCertPath,omitempty" protobuf:"bytes,4,opt,name=serverCertPath"`
+ // ServerKeyPath refers the file that contains private key
+ ServerKeyPath string `json:"serverKeyPath,omitempty" protobuf:"bytes,5,opt,name=serverKeyPath"`
+}
diff --git a/gateways/server/common/webhook/validate.go b/gateways/server/common/webhook/validate.go
new file mode 100644
index 0000000000..63f7b8f84c
--- /dev/null
+++ b/gateways/server/common/webhook/validate.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webhook
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// ValidateWebhookContext validates a webhook context
+func ValidateWebhookContext(context *Context) error {
+ if context == nil {
+ return fmt.Errorf("")
+ }
+ if context.Endpoint == "" {
+ return fmt.Errorf("endpoint can't be empty")
+ }
+ if context.Port == "" {
+ return fmt.Errorf("port can't be empty")
+ }
+ if context.Port != "" {
+ _, err := strconv.Atoi(context.Port)
+ if err != nil {
+ return fmt.Errorf("failed to parse server port %s. err: %+v", context.Port, err)
+ }
+ }
+ return nil
+}
+
+// validateRoute validates a route
+func validateRoute(r *Route) error {
+ if r == nil {
+ return fmt.Errorf("route can't be nil")
+ }
+ if r.Context == nil {
+ return fmt.Errorf("webhook can't be nil")
+ }
+ if r.StartCh == nil {
+ return fmt.Errorf("start channel can't be nil")
+ }
+ if r.EventSource == nil {
+ return fmt.Errorf("event source can't be nil")
+ }
+ if r.Logger == nil {
+ return fmt.Errorf("logger can't be nil")
+ }
+ return nil
+}
diff --git a/gateways/server/common/webhook/webhook.go b/gateways/server/common/webhook/webhook.go
new file mode 100644
index 0000000000..4a8100b1e8
--- /dev/null
+++ b/gateways/server/common/webhook/webhook.go
@@ -0,0 +1,193 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webhook
+
+import (
+ "fmt"
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/gorilla/mux"
+ "github.com/sirupsen/logrus"
+ "net/http"
+)
+
+// NewController returns a webhook controller
+func NewController() *Controller {
+ return &Controller{
+ ActiveRoutes: make(map[string]*Route),
+ ActiveServerHandlers: make(map[string]*mux.Router),
+ RouteActivateChan: make(chan Router),
+ RouteDeactivateChan: make(chan Router),
+ }
+}
+
+// NewRoute returns a vanilla route
+func NewRoute(hookContext *Context, logger *logrus.Logger, eventSource *gateways.EventSource) *Route {
+ return &Route{
+ Context: hookContext,
+ Logger: logger,
+ EventSource: eventSource,
+ Active: false,
+ DataCh: make(chan []byte),
+ StartCh: make(chan struct{}),
+ }
+}
+
+// ProcessRouteStatus processes route status as active and inactive.
+func ProcessRouteStatus(ctrl *Controller) {
+ for {
+ select {
+ case router := <-ctrl.RouteActivateChan:
+ // start server if it has not been started on this port
+ startServer(router, ctrl)
+ // to allow route process incoming requests
+ router.GetRoute().StartCh <- struct{}{}
+
+ case router := <-ctrl.RouteDeactivateChan:
+ router.GetRoute().Active = false
+ }
+ }
+}
+
+// starts a http server
+func startServer(router Router, controller *Controller) {
+ // start a http server only if no other configuration previously started the server on given port
+ Lock.Lock()
+ route := router.GetRoute()
+ if _, ok := controller.ActiveServerHandlers[route.Context.Port]; !ok {
+ handler := mux.NewRouter()
+ server := &http.Server{
+ Addr: fmt.Sprintf(":%s", route.Context.Port),
+ Handler: handler,
+ }
+
+ controller.ActiveServerHandlers[route.Context.Port] = handler
+
+ // start http server
+ go func() {
+ var err error
+ if route.Context.ServerCertPath == "" || route.Context.ServerKeyPath == "" {
+ err = server.ListenAndServe()
+ } else {
+ err = server.ListenAndServeTLS(route.Context.ServerCertPath, route.Context.ServerKeyPath)
+ }
+ route.Logger.WithField(common.LabelEventSource, route.EventSource.Name).WithError(err).Error("http server stopped")
+ if err != nil {
+ route.Logger.WithError(err).WithField("port", route.Context.Port).Errorln("failed to listen and serve")
+ }
+ }()
+ }
+
+ // if route is not previously initialized, then assign a router against it
+ if !route.initialized {
+ handler := controller.ActiveServerHandlers[route.Context.Port]
+ handler.HandleFunc(route.Context.Endpoint, router.HandleRoute).Methods(route.Context.Method)
+ }
+
+ Lock.Unlock()
+}
+
+// activateRoute activates a route to process incoming requests
+func activateRoute(router Router, controller *Controller) {
+ route := router.GetRoute()
+ endpoint := route.Context.Endpoint
+ // change status of route as a active route
+ controller.RouteActivateChan <- router
+
+ // wait for any route to become ready
+ // if this is the first route that is added for a server, then controller will
+ // start a http server before marking the route as ready
+ <-route.StartCh
+
+ log := route.Logger.WithFields(
+ map[string]interface{}{
+ common.LabelEventSource: route.EventSource.Name,
+ common.LabelPort: route.Context.Port,
+ common.LabelEndpoint: endpoint,
+ })
+
+ log.Info("activating the route...")
+ route.Active = true
+ log.Info("route is activated")
+}
+
+// manageRouteStream consumes data from route's data channel and stops the processing when the event source is stopped/removed
+func manageRouteStream(router Router, controller *Controller, eventStream gateways.Eventing_StartEventSourceServer) error {
+ route := router.GetRoute()
+
+ for {
+ select {
+ case data := <-route.DataCh:
+ route.Logger.WithField(common.LabelEventSource, route.EventSource.Name).Info("new event received, dispatching to gateway client")
+ err := eventStream.Send(&gateways.Event{
+ Name: route.EventSource.Name,
+ Payload: data,
+ })
+ if err != nil {
+ route.Logger.WithField(common.LabelEventSource, route.EventSource.Name).WithError(err).Error("failed to send event")
+ return err
+ }
+
+ case <-eventStream.Context().Done():
+ route.Logger.WithField(common.LabelEventSource, route.EventSource.Name).Info("connection is closed by client")
+ controller.RouteDeactivateChan <- router
+ return nil
+ }
+ }
+}
+
+// ManagerRoute manages the lifecycle of a route
+func ManageRoute(router Router, controller *Controller, eventStream gateways.Eventing_StartEventSourceServer) error {
+ route := router.GetRoute()
+
+ logger := route.Logger.WithField(common.LabelEventSource, route.EventSource.Name)
+
+ // in order to process a route, it needs to go through
+ // 1. validation - basic configuration checks
+ // 2. activation - associate http handler if not done previously
+ // 3. post start operations - operations that must be performed after route has been activated and ready to process requests
+ // 4. consume data from route's data channel
+ // 5. post stop operations - operations that must be performed after route is inactivated
+
+ logger.Info("validating the route...")
+ if err := validateRoute(router.GetRoute()); err != nil {
+ logger.WithError(err).Error("route is invalid, won't initialize it")
+ return err
+ }
+
+ logger.Info("activating the route...")
+ activateRoute(router, controller)
+
+ logger.Info("running operations post route activation...")
+ if err := router.PostActivate(); err != nil {
+ logger.WithError(err).Error("error occurred while performing post route activation operations")
+ return err
+ }
+
+ logger.Info("listening to payloads for the route...")
+ if err := manageRouteStream(router, controller, eventStream); err != nil {
+ logger.WithError(err).Error("error occurred in consuming payload from the route")
+ return err
+ }
+
+ logger.Info("running operations post route inactivation...")
+ if err := router.PostInactivate(); err != nil {
+ logger.WithError(err).Error("error occurred while running operations post route inactivation")
+ }
+
+ return nil
+}
diff --git a/gateways/core/webhook/config_test.go b/gateways/server/common/webhook/webhook_test.go
similarity index 61%
rename from gateways/core/webhook/config_test.go
rename to gateways/server/common/webhook/webhook_test.go
index 044285ae78..688ab9c3e9 100644
--- a/gateways/core/webhook/config_test.go
+++ b/gateways/server/common/webhook/webhook_test.go
@@ -19,22 +19,22 @@ package webhook
import (
"testing"
- "github.com/argoproj/argo-events/gateways/common"
"github.com/smartystreets/goconvey/convey"
)
-var es = `
-endpoint: "/bar"
-port: "10000"
-method: "POST"
-`
-
-func TestParseConfig(t *testing.T) {
- convey.Convey("Given a webhook event source, parse it", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
- _, ok := ps.(*common.Webhook)
- convey.So(ok, convey.ShouldEqual, true)
+var rc = &FakeRouter{
+ route: GetFakeRoute(),
+}
+
+func TestValidateWebhook(t *testing.T) {
+ convey.Convey("Given a webhook, validate it", t, func() {
+ convey.So(ValidateWebhookContext(Hook), convey.ShouldBeNil)
+ })
+}
+
+func TestNewWebhookHelper(t *testing.T) {
+ convey.Convey("Make sure webhook helper is not empty", t, func() {
+ controller := NewController()
+ convey.So(controller, convey.ShouldNotBeNil)
})
}
diff --git a/gateways/core/file/Dockerfile b/gateways/server/file/Dockerfile
similarity index 100%
rename from gateways/core/file/Dockerfile
rename to gateways/server/file/Dockerfile
diff --git a/gateways/core/stream/nats/cmd/main.go b/gateways/server/file/cmd/main.go
similarity index 76%
rename from gateways/core/stream/nats/cmd/main.go
rename to gateways/server/file/cmd/main.go
index 82bbb71ae2..14ab2ea09a 100644
--- a/gateways/core/stream/nats/cmd/main.go
+++ b/gateways/server/file/cmd/main.go
@@ -18,12 +18,12 @@ package main
import (
"github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "github.com/argoproj/argo-events/gateways/core/stream/nats"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/file"
)
func main() {
- gateways.StartGateway(&nats.NatsEventSourceExecutor{
- Log: common.NewArgoEventsLogger(),
+ server.StartGateway(&file.EventListener{
+ Logger: common.NewArgoEventsLogger(),
})
}
diff --git a/gateways/server/file/start.go b/gateways/server/file/start.go
new file mode 100644
index 0000000000..ed0d982100
--- /dev/null
+++ b/gateways/server/file/start.go
@@ -0,0 +1,141 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package file
+
+import (
+ "encoding/json"
+ "regexp"
+ "strings"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/common/fsevent"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/fsnotify/fsnotify"
+ "github.com/ghodss/yaml"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// EventListener implements Eventing for file event source
+type EventListener struct {
+ // Logger to log stuff
+ Logger *logrus.Logger
+}
+
+// StartEventSource starts an event source
+func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
+ log := listener.Logger.WithField(common.LabelEventSource, eventSource.Name)
+ log.Info("started processing event source...")
+
+ dataCh := make(chan []byte)
+ errorCh := make(chan error)
+ doneCh := make(chan struct{}, 1)
+
+ go listener.listenEvents(eventSource, dataCh, errorCh, doneCh)
+ return server.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, listener.Logger)
+}
+
+// listenEvents listen to file related events
+func (listener *EventListener) listenEvents(eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
+ defer server.Recover(eventSource.Name)
+
+ var fileEventSource *v1alpha1.FileEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &fileEventSource); err != nil {
+ errorCh <- err
+ return
+ }
+
+ logger := listener.Logger.WithField(common.LabelEventSource, eventSource.Name)
+
+ // create new fs watcher
+ logger.Infoln("setting up a new file watcher...")
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ errorCh <- err
+ return
+ }
+ defer watcher.Close()
+
+ // file descriptor to watch must be available in file system. You can't watch an fs descriptor that is not present.
+ logger.Infoln("adding directory to monitor for the watcher...")
+ err = watcher.Add(fileEventSource.WatchPathConfig.Directory)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+
+ var pathRegexp *regexp.Regexp
+ if fileEventSource.WatchPathConfig.PathRegexp != "" {
+ logger.WithField("regex", fileEventSource.WatchPathConfig.PathRegexp).Infoln("matching file path with configured regex...")
+ pathRegexp, err = regexp.Compile(fileEventSource.WatchPathConfig.PathRegexp)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+ }
+
+ logger.Info("listening to file notifications...")
+ for {
+ select {
+ case event, ok := <-watcher.Events:
+ if !ok {
+ logger.Info("fs watcher has stopped")
+ // watcher stopped watching file events
+ errorCh <- errors.New("fs watcher stopped")
+ return
+ }
+ // fwc.Path == event.Name is required because we don't want to send event when .swp files are created
+ matched := false
+ relPath := strings.TrimPrefix(event.Name, fileEventSource.WatchPathConfig.Directory)
+ if fileEventSource.WatchPathConfig.Path != "" && fileEventSource.WatchPathConfig.Path == relPath {
+ matched = true
+ } else if pathRegexp != nil && pathRegexp.MatchString(relPath) {
+ matched = true
+ }
+ if matched && fileEventSource.EventType == event.Op.String() {
+ logger.WithFields(
+ map[string]interface{}{
+ "event-type": event.Op.String(),
+ "descriptor-name": event.Name,
+ },
+ ).Infoln("file event")
+
+ // Assume fsnotify event has the same Op spec of our file event
+ fileEvent := fsevent.Event{Name: event.Name, Op: fsevent.NewOp(event.Op.String())}
+ payload, err := json.Marshal(fileEvent)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+ logger.WithFields(
+ map[string]interface{}{
+ "event-type": event.Op.String(),
+ "descriptor-name": event.Name,
+ },
+ ).Infoln("dispatching file event on data channel...")
+ dataCh <- payload
+ }
+ case err := <-watcher.Errors:
+ errorCh <- err
+ return
+ case <-doneCh:
+ return
+ }
+ }
+}
diff --git a/gateways/server/file/validate.go b/gateways/server/file/validate.go
new file mode 100644
index 0000000000..408ed85da2
--- /dev/null
+++ b/gateways/server/file/validate.go
@@ -0,0 +1,68 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package file
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+)
+
+// ValidateEventSource validates file event source
+func (listener *EventListener) ValidateEventSource(ctx context.Context, eventSource *gateways.EventSource) (*gateways.ValidEventSource, error) {
+ if apicommon.EventSourceType(eventSource.Type) != apicommon.FileEvent {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: common.ErrEventSourceTypeMismatch(string(apicommon.FileEvent)),
+ }, nil
+ }
+
+ var fileEventSource *v1alpha1.FileEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &fileEventSource); err != nil {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ if err := validate(fileEventSource); err != nil {
+ return &gateways.ValidEventSource{
+ Reason: err.Error(),
+ IsValid: false,
+ }, nil
+ }
+
+ return &gateways.ValidEventSource{
+ IsValid: true,
+ }, nil
+}
+
+func validate(fileEventSource *v1alpha1.FileEventSource) error {
+ if fileEventSource == nil {
+ return common.ErrNilEventSource
+ }
+ if fileEventSource.EventType == "" {
+ return fmt.Errorf("type must be specified")
+ }
+ err := fileEventSource.WatchPathConfig.Validate()
+ return err
+}
diff --git a/gateways/server/file/validate_test.go b/gateways/server/file/validate_test.go
new file mode 100644
index 0000000000..26725bae21
--- /dev/null
+++ b/gateways/server/file/validate_test.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package file
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "testing"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestEventSourceListener_ValidateEventSource(t *testing.T) {
+ listener := &EventListener{}
+
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "file",
+ Value: nil,
+ Type: "sq",
+ })
+ assert.Equal(t, false, valid.IsValid)
+ assert.Equal(t, common.ErrEventSourceTypeMismatch("file"), valid.Reason)
+
+ content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gateways.EventSourceDir, "file.yaml"))
+ assert.Nil(t, err)
+
+ var eventSource *v1alpha1.EventSource
+ err = yaml.Unmarshal(content, &eventSource)
+ assert.Nil(t, err)
+
+ for name, value := range eventSource.Spec.File {
+ fmt.Println(name)
+ content, err := yaml.Marshal(value)
+ assert.Nil(t, err)
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "file",
+ Value: content,
+ Type: "file",
+ })
+ fmt.Println(valid.Reason)
+ assert.Equal(t, true, valid.IsValid)
+ }
+}
diff --git a/gateways/community/gcp-pubsub/Dockerfile b/gateways/server/gcp-pubsub/Dockerfile
similarity index 100%
rename from gateways/community/gcp-pubsub/Dockerfile
rename to gateways/server/gcp-pubsub/Dockerfile
diff --git a/gateways/core/calendar/cmd/main.go b/gateways/server/gcp-pubsub/cmd/main.go
similarity index 76%
rename from gateways/core/calendar/cmd/main.go
rename to gateways/server/gcp-pubsub/cmd/main.go
index e8c21b3c1e..342d163091 100644
--- a/gateways/core/calendar/cmd/main.go
+++ b/gateways/server/gcp-pubsub/cmd/main.go
@@ -18,12 +18,12 @@ package main
import (
"github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "github.com/argoproj/argo-events/gateways/core/calendar"
+ "github.com/argoproj/argo-events/gateways/server"
+ pubsub "github.com/argoproj/argo-events/gateways/server/gcp-pubsub"
)
func main() {
- gateways.StartGateway(&calendar.CalendarEventSourceExecutor{
- Log: common.NewArgoEventsLogger(),
+ server.StartGateway(&pubsub.EventListener{
+ Logger: common.NewArgoEventsLogger(),
})
}
diff --git a/gateways/server/gcp-pubsub/start.go b/gateways/server/gcp-pubsub/start.go
new file mode 100644
index 0000000000..471dd32575
--- /dev/null
+++ b/gateways/server/gcp-pubsub/start.go
@@ -0,0 +1,156 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pubsub
+
+import (
+ "context"
+ "fmt"
+
+ "cloud.google.com/go/pubsub"
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/sirupsen/logrus"
+ "google.golang.org/api/option"
+)
+
+// EventListener implements Eventing for gcp pub-sub event source
+type EventListener struct {
+ // Logger to log stuff
+ Logger *logrus.Logger
+}
+
+// StartEventSource starts processing the GCP PubSub event source
+func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
+ defer server.Recover(eventSource.Name)
+
+ listener.Logger.WithField(common.LabelEventSource, eventSource.Name).Infoln("started processing the event source...")
+
+ ctx := eventStream.Context()
+
+ dataCh := make(chan []byte)
+ errorCh := make(chan error)
+ doneCh := make(chan struct{}, 1)
+
+ go listener.listenEvents(ctx, eventSource, dataCh, errorCh, doneCh)
+ return server.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, listener.Logger)
+}
+
+// listenEvents listens to GCP PubSub events
+func (listener *EventListener) listenEvents(ctx context.Context, eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
+ // In order to listen events from GCP PubSub,
+ // 1. Parse the event source that contains configuration to connect to GCP PubSub
+ // 2. Create a new PubSub client
+ // 3. Create the topic if one doesn't exist already
+ // 4. Create a subscription if one doesn't exist already.
+ // 5. Start listening to messages on the queue
+ // 6. Once the event source is stopped perform cleaning up - 1. Delete the subscription if configured so 2. Close the PubSub client
+
+ logger := listener.Logger.WithField(common.LabelEventSource, eventSource.Name)
+
+ logger.Infoln("parsing PubSub event source...")
+ var pubsubEventSource *v1alpha1.PubSubEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &pubsubEventSource); err != nil {
+ errorCh <- err
+ return
+ }
+
+ logger = logger.WithField("topic", pubsubEventSource.Topic)
+
+ logger.Infoln("setting up a client to connect to PubSub...")
+
+ // Create a new topic with the given name if none exists
+ client, err := pubsub.NewClient(ctx, pubsubEventSource.ProjectID, option.WithCredentialsFile(pubsubEventSource.CredentialsFile))
+ if err != nil {
+ errorCh <- err
+ return
+ }
+
+ // use same client for topic and subscription by default
+ topicClient := client
+ if pubsubEventSource.TopicProjectID != "" && pubsubEventSource.TopicProjectID != pubsubEventSource.ProjectID {
+ topicClient, err = pubsub.NewClient(ctx, pubsubEventSource.TopicProjectID, option.WithCredentialsFile(pubsubEventSource.CredentialsFile))
+ if err != nil {
+ errorCh <- err
+ return
+ }
+ }
+
+ logger.Infoln("getting topic information from PubSub...")
+ topic := topicClient.Topic(pubsubEventSource.Topic)
+ exists, err := topic.Exists(ctx)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+ if !exists {
+ logger.Infoln("topic doesn't exist, creating the GCP PubSub topic...")
+ if _, err := topicClient.CreateTopic(ctx, pubsubEventSource.Topic); err != nil {
+ errorCh <- err
+ return
+ }
+ }
+
+ subscriptionName := fmt.Sprintf("%s-%s", eventSource.Name, eventSource.Id)
+
+ logger = logger.WithField("subscription", subscriptionName)
+
+ logger.Infoln("subscribing to PubSub topic...")
+ subscription := client.Subscription(subscriptionName)
+ exists, err = subscription.Exists(ctx)
+
+ if err != nil {
+ errorCh <- err
+ return
+ }
+ if exists {
+ logger.Warnln("using an existing subscription...")
+ } else {
+ logger.Infoln("creating a new subscription...")
+ if _, err := client.CreateSubscription(ctx, subscriptionName, pubsub.SubscriptionConfig{Topic: topic}); err != nil {
+ errorCh <- err
+ return
+ }
+ }
+
+ logger.Infoln("listening for messages from PubSub...")
+ err = subscription.Receive(ctx, func(msgCtx context.Context, m *pubsub.Message) {
+ logger.Info("received GCP PubSub Message from topic")
+ dataCh <- m.Data
+ m.Ack()
+ })
+ if err != nil {
+ errorCh <- err
+ return
+ }
+
+ <-doneCh
+
+ if pubsubEventSource.DeleteSubscriptionOnFinish {
+ logger.Info("deleting PubSub subscription...")
+ if err = subscription.Delete(context.Background()); err != nil {
+ logger.WithError(err).Errorln("failed to delete the PubSub subscription")
+ }
+ }
+
+ logger.Info("closing PubSub client...")
+ if err = client.Close(); err != nil {
+ logger.WithError(err).Errorln("failed to close the PubSub client")
+ }
+}
diff --git a/gateways/server/gcp-pubsub/validate.go b/gateways/server/gcp-pubsub/validate.go
new file mode 100644
index 0000000000..3bf4c758b6
--- /dev/null
+++ b/gateways/server/gcp-pubsub/validate.go
@@ -0,0 +1,76 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pubsub
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+)
+
+// ValidateEventSource validates gateway event source
+func (listener *EventListener) ValidateEventSource(ctx context.Context, eventSource *gateways.EventSource) (*gateways.ValidEventSource, error) {
+ if apicommon.EventSourceType(eventSource.Type) != apicommon.PubSubEvent {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: common.ErrEventSourceTypeMismatch(string(apicommon.PubSubEvent)),
+ }, nil
+ }
+
+ var pubsubEventSource *v1alpha1.PubSubEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &pubsubEventSource); err != nil {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ if err := validate(pubsubEventSource); err != nil {
+ return &gateways.ValidEventSource{
+ Reason: err.Error(),
+ IsValid: false,
+ }, nil
+ }
+
+ return &gateways.ValidEventSource{
+ IsValid: true,
+ }, nil
+}
+
+func validate(eventSource *v1alpha1.PubSubEventSource) error {
+ if eventSource == nil {
+ return common.ErrNilEventSource
+ }
+ if eventSource.ProjectID == "" {
+ return fmt.Errorf("must specify projectId")
+ }
+ if eventSource.Topic == "" {
+ return fmt.Errorf("must specify topic")
+ }
+ if eventSource.CredentialsFile != "" {
+ if _, err := os.Stat(eventSource.CredentialsFile); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/gateways/server/gcp-pubsub/validate_test.go b/gateways/server/gcp-pubsub/validate_test.go
new file mode 100644
index 0000000000..5cf2284e00
--- /dev/null
+++ b/gateways/server/gcp-pubsub/validate_test.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pubsub
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "testing"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestEventListener_ValidateEventSource(t *testing.T) {
+ listener := &EventListener{}
+
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "pubsub",
+ Value: nil,
+ Type: "sq",
+ })
+ assert.Equal(t, false, valid.IsValid)
+ assert.Equal(t, common.ErrEventSourceTypeMismatch("pubsub"), valid.Reason)
+
+ content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gateways.EventSourceDir, "gcp-pubsub.yaml"))
+ assert.Nil(t, err)
+
+ var eventSource *v1alpha1.EventSource
+ err = yaml.Unmarshal(content, &eventSource)
+ assert.Nil(t, err)
+
+ for name, value := range eventSource.Spec.PubSub {
+ fmt.Println(name)
+ content, err := yaml.Marshal(value)
+ assert.Nil(t, err)
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "pubsub",
+ Value: content,
+ Type: "pubsub",
+ })
+ fmt.Println(valid.Reason)
+ assert.Equal(t, true, valid.IsValid)
+ }
+}
diff --git a/gateways/community/github/Dockerfile b/gateways/server/github/Dockerfile
similarity index 100%
rename from gateways/community/github/Dockerfile
rename to gateways/server/github/Dockerfile
diff --git a/gateways/community/github/cmd/main.go b/gateways/server/github/cmd/main.go
similarity index 76%
rename from gateways/community/github/cmd/main.go
rename to gateways/server/github/cmd/main.go
index 5488e25db8..50a39718a9 100644
--- a/gateways/community/github/cmd/main.go
+++ b/gateways/server/github/cmd/main.go
@@ -17,11 +17,12 @@ limitations under the License.
package main
import (
+ "os"
+
"github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "github.com/argoproj/argo-events/gateways/community/github"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/github"
"k8s.io/client-go/kubernetes"
- "os"
)
func main() {
@@ -31,13 +32,13 @@ func main() {
panic(err)
}
clientset := kubernetes.NewForConfigOrDie(restConfig)
- namespace, ok := os.LookupEnv(common.EnvVarGatewayNamespace)
+ namespace, ok := os.LookupEnv(common.EnvVarNamespace)
if !ok {
panic("namespace is not provided")
}
- gateways.StartGateway(&github.GithubEventSourceExecutor{
- Log: common.NewArgoEventsLogger(),
+ server.StartGateway(&github.EventListener{
+ Logger: common.NewArgoEventsLogger(),
Namespace: namespace,
- Clientset: clientset,
+ K8sClient: clientset,
})
}
diff --git a/gateways/community/github/hook_util.go b/gateways/server/github/hook_util.go
similarity index 100%
rename from gateways/community/github/hook_util.go
rename to gateways/server/github/hook_util.go
diff --git a/gateways/community/github/hook_util_test.go b/gateways/server/github/hook_util_test.go
similarity index 100%
rename from gateways/community/github/hook_util_test.go
rename to gateways/server/github/hook_util_test.go
index 595e88ad85..e474cc3377 100644
--- a/gateways/community/github/hook_util_test.go
+++ b/gateways/server/github/hook_util_test.go
@@ -1,9 +1,9 @@
package github
import (
- "testing"
- "github.com/stretchr/testify/assert"
gh "github.com/google/go-github/github"
+ "github.com/stretchr/testify/assert"
+ "testing"
)
func TestSliceEqual(t *testing.T) {
diff --git a/gateways/server/github/start.go b/gateways/server/github/start.go
new file mode 100644
index 0000000000..6bc3ca48c7
--- /dev/null
+++ b/gateways/server/github/start.go
@@ -0,0 +1,299 @@
+/*
+Copyright 2018 KompiTech GmbH
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package github
+
+import (
+ "context"
+ "encoding/json"
+ "github.com/argoproj/argo-events/gateways/server"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/argoproj/argo-events/store"
+ "github.com/ghodss/yaml"
+ gh "github.com/google/go-github/github"
+ "github.com/pkg/errors"
+ corev1 "k8s.io/api/core/v1"
+)
+
+// GitHub headers
+const (
+ githubEventHeader = "X-GitHub-Event"
+ githubDeliveryHeader = "X-GitHub-Delivery"
+)
+
+// controller controls the webhook operations
+var (
+ controller = webhook.NewController()
+)
+
+// set up the activation and inactivation channels to control the state of routes.
+func init() {
+ go webhook.ProcessRouteStatus(controller)
+}
+
+// getCredentials for retrieves credentials for GitHub connection
+func (router *Router) getCredentials(keySelector *corev1.SecretKeySelector, namespace string) (*cred, error) {
+ token, err := store.GetSecrets(router.k8sClient, namespace, keySelector.Name, keySelector.Key)
+ if err != nil {
+ return nil, err
+ }
+ return &cred{
+ secret: token,
+ }, nil
+}
+
+// Implement Router
+// 1. GetRoute
+// 2. HandleRoute
+// 3. PostActivate
+// 4. PostDeactivate
+
+// GetRoute returns the route
+func (router *Router) GetRoute() *webhook.Route {
+ return router.route
+}
+
+// HandleRoute handles incoming requests on the route
+func (router *Router) HandleRoute(writer http.ResponseWriter, request *http.Request) {
+ route := router.route
+
+ logger := route.Logger.WithFields(
+ map[string]interface{}{
+ common.LabelEventSource: route.EventSource.Name,
+ common.LabelEndpoint: route.Context.Endpoint,
+ common.LabelPort: route.Context.Port,
+ })
+
+ logger.Info("received a request, processing it...")
+
+ if !route.Active {
+ logger.Info("endpoint is not active, won't process the request")
+ common.SendErrorResponse(writer, "endpoint is inactive")
+ return
+ }
+
+ hook := router.hook
+ secret := ""
+ if s, ok := hook.Config["secret"]; ok {
+ secret = s.(string)
+ }
+
+ body, err := parseValidateRequest(request, []byte(secret))
+ if err != nil {
+ logger.WithError(err).Error("request is not valid event notification, discarding it")
+ common.SendErrorResponse(writer, err.Error())
+ return
+ }
+
+ logger.Infoln("dispatching event on route's data channel")
+ route.DataCh <- body
+ logger.Info("request successfully processed")
+
+ common.SendSuccessResponse(writer, "success")
+}
+
+// PostActivate performs operations once the route is activated and ready to consume requests
+func (router *Router) PostActivate() error {
+ // In order to successfully setup a GitHub hook for the given repository,
+ // 1. Get the API Token and Webhook secret from K8s secrets
+ // 2. Configure the hook with url, content type, ssl etc.
+ // 3. Set up a GitHub client
+ // 4. Set the base and upload url for the client
+ // 5. Create the hook if one doesn't exist already. If exists already, then use that one.
+
+ route := router.route
+ githubEventSource := router.githubEventSource
+
+ logger := route.Logger.WithFields(map[string]interface{}{
+ common.LabelEventSource: route.EventSource.Name,
+ "repository": githubEventSource.Repository,
+ })
+
+ logger.Infoln("retrieving api token credentials...")
+ apiTokenCreds, err := router.getCredentials(githubEventSource.APIToken, githubEventSource.Namespace)
+ if err != nil {
+ return errors.Errorf("failed to retrieve api token credentials. err: %+v", err)
+ }
+
+ logger.Infoln("setting up auth with api token...")
+ PATTransport := TokenAuthTransport{
+ Token: apiTokenCreds.secret,
+ }
+
+ logger.Infoln("configuring GitHub hook...")
+ formattedUrl := common.FormattedURL(githubEventSource.Webhook.URL, githubEventSource.Webhook.Endpoint)
+ hookConfig := map[string]interface{}{
+ "url": &formattedUrl,
+ }
+
+ if githubEventSource.ContentType != "" {
+ hookConfig["content_type"] = githubEventSource.ContentType
+ }
+
+ if githubEventSource.Insecure {
+ hookConfig["insecure_ssl"] = "1"
+ } else {
+ hookConfig["insecure_ssl"] = "0"
+ }
+
+ logger.Infoln("retrieving webhook secret credentials...")
+ if githubEventSource.WebhookSecret != nil {
+ webhookSecretCreds, err := router.getCredentials(githubEventSource.WebhookSecret, githubEventSource.Namespace)
+ if err != nil {
+ return errors.Errorf("failed to retrieve webhook secret. err: %+v", err)
+ }
+ hookConfig["secret"] = webhookSecretCreds.secret
+ }
+
+ router.hook = &gh.Hook{
+ Events: githubEventSource.Events,
+ Active: gh.Bool(githubEventSource.Active),
+ Config: hookConfig,
+ }
+
+ logger.Infoln("setting up client for GitHub...")
+ router.githubClient = gh.NewClient(PATTransport.Client())
+
+ logger.Infoln("setting up base url for GitHub client...")
+ if githubEventSource.GithubBaseURL != "" {
+ baseURL, err := url.Parse(githubEventSource.GithubBaseURL)
+ if err != nil {
+ return errors.Errorf("failed to parse github base url. err: %s", err)
+ }
+ router.githubClient.BaseURL = baseURL
+ }
+
+ logger.Infoln("setting up the upload url for GitHub client...")
+ if githubEventSource.GithubUploadURL != "" {
+ uploadURL, err := url.Parse(githubEventSource.GithubUploadURL)
+ if err != nil {
+ return errors.Errorf("failed to parse github upload url. err: %s", err)
+ }
+ router.githubClient.UploadURL = uploadURL
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ logger.Infoln("creating a GitHub hook for the repository...")
+ hook, _, err := router.githubClient.Repositories.CreateHook(ctx, githubEventSource.Owner, githubEventSource.Repository, router.hook)
+ if err != nil {
+ // Continue if error is because hook already exists
+ er, ok := err.(*gh.ErrorResponse)
+ if !ok || er.Response.StatusCode != http.StatusUnprocessableEntity {
+ return errors.Errorf("failed to create webhook. err: %+v", err)
+ }
+ }
+
+ // if hook alreay exists then CreateHook returns hook value as nil
+ if hook == nil {
+ logger.Infoln("GitHub hook for the repository already exists, trying to use the existing hook...")
+ ctx, cancel = context.WithTimeout(context.Background(), 20*time.Second)
+ defer cancel()
+
+ hooks, _, err := router.githubClient.Repositories.ListHooks(ctx, githubEventSource.Owner, githubEventSource.Repository, nil)
+ if err != nil {
+ return errors.Errorf("failed to list existing webhooks. err: %+v", err)
+ }
+
+ hook = getHook(hooks, formattedUrl, githubEventSource.Events)
+ if hook == nil {
+ return errors.New("failed to find existing webhook")
+ }
+ }
+
+ if githubEventSource.WebhookSecret != nil {
+ // As secret in hook config is masked with asterisk (*), replace it with unmasked secret.
+ hook.Config["secret"] = hookConfig["secret"]
+ }
+
+ router.hook = hook
+ logger.Infoln("GitHub hook has been successfully set for the repository")
+
+ return nil
+}
+
+// PostInactivate performs operations after the route is inactivated
+func (router *Router) PostInactivate() error {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ githubEventSource := router.githubEventSource
+
+ if githubEventSource.DeleteHookOnFinish {
+ logger := router.route.Logger.WithFields(map[string]interface{}{
+ common.LabelEventSource: router.route.EventSource.Name,
+ "repository": githubEventSource.Repository,
+ "hook-id": *router.hook.ID,
+ })
+
+ logger.Infoln("deleting GitHub hook...")
+ if _, err := router.githubClient.Repositories.DeleteHook(ctx, githubEventSource.Owner, githubEventSource.Repository, *router.hook.ID); err != nil {
+ return errors.Errorf("failed to delete hook. err: %+v", err)
+ }
+ logger.Infoln("GitHub hook deleted")
+ }
+
+ return nil
+}
+
+// StartEventSource starts an event source
+func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
+ defer server.Recover(eventSource.Name)
+
+ listener.Logger.WithField(common.LabelEventSource, eventSource.Name).Infoln("started processing the event source...")
+
+ var githubEventSource *v1alpha1.GithubEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &githubEventSource); err != nil {
+ listener.Logger.WithError(err).WithField(common.LabelEventSource, eventSource.Name).Infoln("failed to parse the event source")
+ return err
+ }
+
+ route := webhook.NewRoute(githubEventSource.Webhook, listener.Logger, eventSource)
+
+ return webhook.ManageRoute(&Router{
+ route: route,
+ k8sClient: listener.K8sClient,
+ githubEventSource: githubEventSource,
+ }, controller, eventStream)
+}
+
+// parseValidateRequest parses a http request and checks if it is valid GitHub notification
+func parseValidateRequest(r *http.Request, secret []byte) ([]byte, error) {
+ body, err := gh.ValidatePayload(r, secret)
+ if err != nil {
+ return nil, err
+ }
+
+ payload := make(map[string]interface{})
+ if err := json.Unmarshal(body, &payload); err != nil {
+ return nil, err
+ }
+ for _, h := range []string{
+ githubEventHeader,
+ githubDeliveryHeader,
+ } {
+ payload[h] = r.Header.Get(h)
+ }
+ return json.Marshal(payload)
+}
diff --git a/gateways/community/github/start_test.go b/gateways/server/github/start_test.go
similarity index 60%
rename from gateways/community/github/start_test.go
rename to gateways/server/github/start_test.go
index 9206c8d5bf..f8f06577c3 100644
--- a/gateways/community/github/start_test.go
+++ b/gateways/server/github/start_test.go
@@ -19,11 +19,12 @@ package github
import (
"bytes"
"encoding/json"
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
"io/ioutil"
"net/http"
"testing"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
"github.com/ghodss/yaml"
"github.com/google/go-github/github"
"github.com/smartystreets/goconvey/convey"
@@ -33,24 +34,24 @@ import (
)
var (
- rc = &RouteConfig{
- route: gwcommon.GetFakeRoute(),
- clientset: fake.NewSimpleClientset(),
- namespace: "fake",
+ router = &Router{
+ route: webhook.GetFakeRoute(),
+ k8sClient: fake.NewSimpleClientset(),
+ githubEventSource: &v1alpha1.GithubEventSource{
+ Namespace: "fake",
+ },
}
- secretName = "githab-access"
+ secretName = "github-access"
accessKey = "YWNjZXNz"
LabelAccessKey = "accesskey"
)
func TestGetCredentials(t *testing.T) {
convey.Convey("Given a kubernetes secret, get credentials", t, func() {
-
- secret, err := rc.clientset.CoreV1().Secrets(rc.namespace).Create(&corev1.Secret{
+ secret, err := router.k8sClient.CoreV1().Secrets(router.githubEventSource.Namespace).Create(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
- Name: secretName,
- Namespace: rc.namespace,
+ Name: secretName,
},
Data: map[string][]byte{
LabelAccessKey: []byte(accessKey),
@@ -59,9 +60,26 @@ func TestGetCredentials(t *testing.T) {
convey.So(err, convey.ShouldBeNil)
convey.So(secret, convey.ShouldNotBeNil)
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- creds, err := rc.getCredentials(ps.(*githubEventSource).APIToken)
+ githubEventSource := &v1alpha1.GithubEventSource{
+ Webhook: &webhook.Context{
+ Endpoint: "/push",
+ URL: "http://webhook-gateway-svc",
+ Port: "12000",
+ },
+ Owner: "fake",
+ Repository: "fake",
+ Events: []string{
+ "PushEvent",
+ },
+ APIToken: &corev1.SecretKeySelector{
+ Key: LabelAccessKey,
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "github-access",
+ },
+ },
+ }
+
+ creds, err := router.getCredentials(githubEventSource.APIToken, githubEventSource.Namespace)
convey.So(err, convey.ShouldBeNil)
convey.So(creds, convey.ShouldNotBeNil)
convey.So(creds.secret, convey.ShouldEqual, "YWNjZXNz")
@@ -70,35 +88,51 @@ func TestGetCredentials(t *testing.T) {
func TestRouteActiveHandler(t *testing.T) {
convey.Convey("Given a route configuration", t, func() {
- r := rc.route
- helper.ActiveEndpoints[r.Webhook.Endpoint] = &gwcommon.Endpoint{
- DataCh: make(chan []byte),
- }
+ route := router.route
+ route.DataCh = make(chan []byte)
convey.Convey("Inactive route should return error", func() {
- writer := &gwcommon.FakeHttpWriter{}
- ps, err := parseEventSource(es)
+ writer := &webhook.FakeHttpWriter{}
+ githubEventSource := &v1alpha1.GithubEventSource{
+ Webhook: &webhook.Context{
+ Endpoint: "/push",
+ URL: "http://webhook-gateway-svc",
+ Port: "12000",
+ },
+ Owner: "fake",
+ Repository: "fake",
+ Events: []string{
+ "PushEvent",
+ },
+ APIToken: &corev1.SecretKeySelector{
+ Key: "accessKey",
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "github-access",
+ },
+ },
+ }
+
+ body, err := yaml.Marshal(githubEventSource)
convey.So(err, convey.ShouldBeNil)
- pbytes, err := yaml.Marshal(ps.(*githubEventSource))
- convey.So(err, convey.ShouldBeNil)
- rc.RouteHandler(writer, &http.Request{
- Body: ioutil.NopCloser(bytes.NewReader(pbytes)),
+
+ router.HandleRoute(writer, &http.Request{
+ Body: ioutil.NopCloser(bytes.NewReader(body)),
})
convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusBadRequest)
convey.Convey("Active route should return success", func() {
- helper.ActiveEndpoints[r.Webhook.Endpoint].Active = true
- rc.hook = &github.Hook{
+ route.Active = true
+ router.hook = &github.Hook{
Config: make(map[string]interface{}),
}
- rc.RouteHandler(writer, &http.Request{
- Body: ioutil.NopCloser(bytes.NewReader(pbytes)),
+ router.HandleRoute(writer, &http.Request{
+ Body: ioutil.NopCloser(bytes.NewReader(body)),
})
convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusBadRequest)
- rc.ges = ps.(*githubEventSource)
- err = rc.PostStart()
+ router.githubEventSource = githubEventSource
+ err = router.PostActivate()
convey.So(err, convey.ShouldNotBeNil)
})
})
diff --git a/gateways/community/github/tokenauth.go b/gateways/server/github/tokenauth.go
similarity index 100%
rename from gateways/community/github/tokenauth.go
rename to gateways/server/github/tokenauth.go
diff --git a/gateways/server/github/types.go b/gateways/server/github/types.go
new file mode 100644
index 0000000000..8f593090a8
--- /dev/null
+++ b/gateways/server/github/types.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2018 KompiTech GmbH
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package github
+
+import (
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/google/go-github/github"
+ "github.com/sirupsen/logrus"
+ "k8s.io/client-go/kubernetes"
+)
+
+// EventListener implements Eventing for GitHub event source
+type EventListener struct {
+ // Logger to log stuff
+ Logger *logrus.Logger
+ // K8sClient is the Kubernetes client
+ K8sClient kubernetes.Interface
+ // Namespace where gateway is deployed
+ Namespace string
+}
+
+// Router contains information about the route
+type Router struct {
+ // route contains configuration for an API endpoint
+ route *webhook.Route
+ // githubEventSource is the event source that holds information to consume events from GitHub
+ githubEventSource *v1alpha1.GithubEventSource
+ // githubClient is the client to connect to GitHub
+ githubClient *github.Client
+ // hook represents a GitHub (web and service) hook for a repository.
+ hook *github.Hook
+ // K8sClient is the Kubernetes client
+ k8sClient kubernetes.Interface
+}
+
+// cred stores the api access token or webhook secret
+type cred struct {
+ secret string
+}
diff --git a/gateways/server/github/validate.go b/gateways/server/github/validate.go
new file mode 100644
index 0000000000..1cf261bca4
--- /dev/null
+++ b/gateways/server/github/validate.go
@@ -0,0 +1,79 @@
+/*
+Copyright 2018 KompiTech GmbH
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package github
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+)
+
+// ValidateEventSource validates a github event source
+func (listener *EventListener) ValidateEventSource(ctx context.Context, eventSource *gateways.EventSource) (*gateways.ValidEventSource, error) {
+ if apicommon.EventSourceType(eventSource.Type) != apicommon.GitHubEvent {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: common.ErrEventSourceTypeMismatch(string(apicommon.GitHubEvent)),
+ }, nil
+ }
+
+ var githubEventSource *v1alpha1.GithubEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &githubEventSource); err != nil {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, err
+ }
+
+ if err := validate(githubEventSource); err != nil {
+ return &gateways.ValidEventSource{
+ Reason: err.Error(),
+ IsValid: false,
+ }, err
+ }
+
+ return &gateways.ValidEventSource{
+ IsValid: true,
+ }, nil
+}
+
+func validate(githubEventSource *v1alpha1.GithubEventSource) error {
+ if githubEventSource == nil {
+ return common.ErrNilEventSource
+ }
+ if githubEventSource.Repository == "" {
+ return fmt.Errorf("repository cannot be empty")
+ }
+ if githubEventSource.Owner == "" {
+ return fmt.Errorf("owner cannot be empty")
+ }
+ if githubEventSource.APIToken == nil {
+ return fmt.Errorf("api token can't be empty")
+ }
+ if githubEventSource.Events == nil || len(githubEventSource.Events) < 1 {
+ return fmt.Errorf("events must be defined")
+ }
+ if githubEventSource.ContentType != "" {
+ if !(githubEventSource.ContentType == "json" || githubEventSource.ContentType == "form") {
+ return fmt.Errorf("content type must be \"json\" or \"form\"")
+ }
+ }
+ return webhook.ValidateWebhookContext(githubEventSource.Webhook)
+}
diff --git a/gateways/server/github/validate_test.go b/gateways/server/github/validate_test.go
new file mode 100644
index 0000000000..be76f0be8e
--- /dev/null
+++ b/gateways/server/github/validate_test.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package github
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "testing"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValidateGithubEventSource(t *testing.T) {
+ listener := &EventListener{}
+
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "github",
+ Value: nil,
+ Type: "sq",
+ })
+ assert.Equal(t, false, valid.IsValid)
+ assert.Equal(t, common.ErrEventSourceTypeMismatch("github"), valid.Reason)
+
+ content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gateways.EventSourceDir, "github.yaml"))
+ assert.Nil(t, err)
+
+ var eventSource *v1alpha1.EventSource
+ err = yaml.Unmarshal(content, &eventSource)
+ assert.Nil(t, err)
+
+ for name, value := range eventSource.Spec.Github {
+ fmt.Println(name)
+ content, err := yaml.Marshal(value)
+ assert.Nil(t, err)
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "github",
+ Value: content,
+ Type: "github",
+ })
+ fmt.Println(valid.Reason)
+ assert.Equal(t, true, valid.IsValid)
+ }
+}
diff --git a/gateways/community/gitlab/Dockerfile b/gateways/server/gitlab/Dockerfile
similarity index 100%
rename from gateways/community/gitlab/Dockerfile
rename to gateways/server/gitlab/Dockerfile
diff --git a/gateways/community/aws-sqs/cmd/main.go b/gateways/server/gitlab/cmd/main.go
similarity index 70%
rename from gateways/community/aws-sqs/cmd/main.go
rename to gateways/server/gitlab/cmd/main.go
index 5fbe7e123e..44ff8a90de 100644
--- a/gateways/community/aws-sqs/cmd/main.go
+++ b/gateways/server/gitlab/cmd/main.go
@@ -20,8 +20,8 @@ import (
"os"
"github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "github.com/argoproj/argo-events/gateways/community/aws-sqs"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/gitlab"
"k8s.io/client-go/kubernetes"
)
@@ -32,13 +32,8 @@ func main() {
panic(err)
}
clientset := kubernetes.NewForConfigOrDie(restConfig)
- namespace, ok := os.LookupEnv(common.EnvVarGatewayNamespace)
- if !ok {
- panic("namespace is not provided")
- }
- gateways.StartGateway(&aws_sqs.SQSEventSourceExecutor{
- Log: common.NewArgoEventsLogger(),
- Clientset: clientset,
- Namespace: namespace,
+ server.StartGateway(&gitlab.EventListener{
+ Logger: common.NewArgoEventsLogger(),
+ K8sClient: clientset,
})
}
diff --git a/gateways/server/gitlab/start.go b/gateways/server/gitlab/start.go
new file mode 100644
index 0000000000..2251d0c9a6
--- /dev/null
+++ b/gateways/server/gitlab/start.go
@@ -0,0 +1,202 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gitlab
+
+import (
+ "io/ioutil"
+ "net/http"
+ "reflect"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/argoproj/argo-events/store"
+ "github.com/ghodss/yaml"
+ "github.com/pkg/errors"
+ "github.com/xanzy/go-gitlab"
+ corev1 "k8s.io/api/core/v1"
+)
+
+// controller controls the webhook operations
+var (
+ controller = webhook.NewController()
+)
+
+// set up the activation and inactivation channels to control the state of routes.
+func init() {
+ go webhook.ProcessRouteStatus(controller)
+}
+
+// getCredentials retrieves credentials to connect to GitLab
+func (router *Router) getCredentials(keySelector *corev1.SecretKeySelector, namespace string) (*cred, error) {
+ token, err := store.GetSecrets(router.k8sClient, namespace, keySelector.Name, keySelector.Key)
+ if err != nil {
+ return nil, err
+ }
+ return &cred{
+ token: token,
+ }, nil
+}
+
+// Implement Router
+// 1. GetRoute
+// 2. HandleRoute
+// 3. PostActivate
+// 4. PostDeactivate
+
+// GetRoute returns the route
+func (router *Router) GetRoute() *webhook.Route {
+ return router.route
+}
+
+// HandleRoute handles incoming requests on the route
+func (router *Router) HandleRoute(writer http.ResponseWriter, request *http.Request) {
+ route := router.route
+
+ logger := route.Logger.WithFields(
+ map[string]interface{}{
+ common.LabelEventSource: route.EventSource.Name,
+ common.LabelEndpoint: route.Context.Endpoint,
+ common.LabelPort: route.Context.Port,
+ })
+
+ logger.Info("received a request, processing it...")
+
+ if route.Active {
+ logger.Info("endpoint is not active, won't process the request")
+ common.SendErrorResponse(writer, "inactive endpoint")
+ return
+ }
+
+ body, err := ioutil.ReadAll(request.Body)
+ if err != nil {
+ logger.WithError(err).Error("failed to parse request body")
+ common.SendErrorResponse(writer, err.Error())
+ return
+ }
+
+ logger.Infoln("dispatching event on route's data channel")
+ route.DataCh <- body
+
+ logger.Info("request successfully processed")
+ common.SendSuccessResponse(writer, "success")
+}
+
+// PostActivate performs operations once the route is activated and ready to consume requests
+func (router *Router) PostActivate() error {
+ route := router.GetRoute()
+ gitlabEventSource := router.gitlabEventSource
+
+ // In order to set up a hook for the GitLab project,
+ // 1. Get the API access token for client
+ // 2. Set up GitLab client
+ // 3. Configure Hook with given event type
+ // 4. Create project hook
+
+ logger := route.Logger.WithFields(map[string]interface{}{
+ common.LabelEventSource: route.EventSource.Name,
+ "event-type": gitlabEventSource.Event,
+ "project-id": gitlabEventSource.ProjectId,
+ })
+
+ logger.Infoln("retrieving the access token credentials...")
+ c, err := router.getCredentials(gitlabEventSource.AccessToken, gitlabEventSource.Namespace)
+ if err != nil {
+ return errors.Errorf("failed to get gitlab credentials. err: %+v", err)
+ }
+
+ logger.Infoln("setting up the client to connect to GitLab...")
+ router.gitlabClient = gitlab.NewClient(nil, c.token)
+ if err = router.gitlabClient.SetBaseURL(gitlabEventSource.GitlabBaseURL); err != nil {
+ return errors.Errorf("failed to set gitlab base url, err: %+v", err)
+ }
+
+ formattedUrl := common.FormattedURL(gitlabEventSource.Webhook.URL, gitlabEventSource.Webhook.Endpoint)
+
+ opt := &gitlab.AddProjectHookOptions{
+ URL: &formattedUrl,
+ Token: &c.token,
+ EnableSSLVerification: &router.gitlabEventSource.EnableSSLVerification,
+ }
+
+ logger.Infoln("configuring the type of the GitLab event the hook must register against...")
+ elem := reflect.ValueOf(opt).Elem().FieldByName(string(router.gitlabEventSource.Event))
+ if ok := elem.IsValid(); !ok {
+ return errors.Errorf("unknown event %s", router.gitlabEventSource.Event)
+ }
+
+ iev := reflect.New(elem.Type().Elem())
+ reflect.Indirect(iev).SetBool(true)
+ elem.Set(iev)
+
+ logger.Infoln("creating project hook...")
+ hook, _, err := router.gitlabClient.Projects.AddProjectHook(router.gitlabEventSource.ProjectId, opt)
+ if err != nil {
+ return errors.Errorf("failed to add project hook. err: %+v", err)
+ }
+
+ router.hook = hook
+ logger.WithField("hook-id", hook.ID).Info("hook created for the project")
+ return nil
+}
+
+// PostInactivate performs operations after the route is inactivated
+func (router *Router) PostInactivate() error {
+ gitlabEventSource := router.gitlabEventSource
+ route := router.route
+
+ if gitlabEventSource.DeleteHookOnFinish {
+ logger := route.Logger.WithFields(map[string]interface{}{
+ common.LabelEventSource: route.EventSource.Name,
+ "project-id": gitlabEventSource.ProjectId,
+ "hook-id": router.hook.ID,
+ })
+
+ logger.Infoln("deleting project hook...")
+ if _, err := router.gitlabClient.Projects.DeleteProjectHook(router.gitlabEventSource.ProjectId, router.hook.ID); err != nil {
+ return errors.Errorf("failed to delete hook. err: %+v", err)
+ }
+
+ logger.Infoln("gitlab hook deleted")
+ }
+ return nil
+}
+
+// StartEventSource starts an event source
+func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
+ defer server.Recover(eventSource.Name)
+
+ logger := listener.Logger.WithField(common.LabelEventSource, eventSource.Name)
+
+ logger.Info("started processing the event source...")
+
+ var gitlabEventSource *v1alpha1.GitlabEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &gitlabEventSource); err != nil {
+ logger.WithError(err).Error("failed to parse the event source")
+ return err
+ }
+
+ route := webhook.NewRoute(gitlabEventSource.Webhook, listener.Logger, eventSource)
+
+ return webhook.ManageRoute(&Router{
+ route: route,
+ k8sClient: listener.K8sClient,
+ gitlabEventSource: gitlabEventSource,
+ }, controller, eventStream)
+}
diff --git a/gateways/server/gitlab/types.go b/gateways/server/gitlab/types.go
new file mode 100644
index 0000000000..392be63e1d
--- /dev/null
+++ b/gateways/server/gitlab/types.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gitlab
+
+import (
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/sirupsen/logrus"
+ "github.com/xanzy/go-gitlab"
+ "k8s.io/client-go/kubernetes"
+)
+
+// EventListener implements ConfigExecutor
+type EventListener struct {
+ Logger *logrus.Logger
+ // K8sClient is kubernetes client
+ K8sClient kubernetes.Interface
+}
+
+// Router contains the configuration information for a route
+type Router struct {
+ // route contains information about a API endpoint
+ route *webhook.Route
+ // K8sClient is the Kubernetes client
+ k8sClient kubernetes.Interface
+ // gitlabClient is the client to connect to GitLab
+ gitlabClient *gitlab.Client
+ // hook is gitlab project hook
+ // GitLab API docs:
+ // https://docs.gitlab.com/ce/api/projects.html#list-project-hooks
+ hook *gitlab.ProjectHook
+ // gitlabEventSource is the event source that contains configuration necessary to consume events from GitLab
+ gitlabEventSource *v1alpha1.GitlabEventSource
+}
+
+// cred stores the api access token
+type cred struct {
+ // token is gitlab api access token
+ token string
+}
diff --git a/gateways/server/gitlab/validate.go b/gateways/server/gitlab/validate.go
new file mode 100644
index 0000000000..54f5b11467
--- /dev/null
+++ b/gateways/server/gitlab/validate.go
@@ -0,0 +1,76 @@
+/*
+Copyright 2018 BlackRock, Inc.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gitlab
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+)
+
+// ValidateEventSource validates gitlab event source
+func (listener *EventListener) ValidateEventSource(ctx context.Context, eventSource *gateways.EventSource) (*gateways.ValidEventSource, error) {
+ if apicommon.EventSourceType(eventSource.Type) != apicommon.GitLabEvent {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: common.ErrEventSourceTypeMismatch(string(apicommon.GitLabEvent)),
+ }, nil
+ }
+
+ var gitlabEventSource *v1alpha1.GitlabEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &gitlabEventSource); err != nil {
+ listener.Logger.WithError(err).Error("failed to parse the event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ if err := validate(gitlabEventSource); err != nil {
+ listener.Logger.WithError(err).Error("failed to validate gitlab event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ return &gateways.ValidEventSource{
+ IsValid: true,
+ }, nil
+}
+
+func validate(eventSource *v1alpha1.GitlabEventSource) error {
+ if eventSource == nil {
+ return common.ErrNilEventSource
+ }
+ if eventSource.ProjectId == "" {
+ return fmt.Errorf("project id can't be empty")
+ }
+ if eventSource.Event == "" {
+ return fmt.Errorf("event type can't be empty")
+ }
+ if eventSource.GitlabBaseURL == "" {
+ return fmt.Errorf("gitlab base url can't be empty")
+ }
+ if eventSource.AccessToken == nil {
+ return fmt.Errorf("access token can't be nil")
+ }
+ return webhook.ValidateWebhookContext(eventSource.Webhook)
+}
diff --git a/gateways/server/gitlab/validate_test.go b/gateways/server/gitlab/validate_test.go
new file mode 100644
index 0000000000..cfa36491cc
--- /dev/null
+++ b/gateways/server/gitlab/validate_test.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gitlab
+
+import (
+ "context"
+ "fmt"
+ "github.com/stretchr/testify/assert"
+ "io/ioutil"
+ "testing"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+)
+
+func TestValidateGitlabEventSource(t *testing.T) {
+ listener := &EventListener{
+ Logger: common.NewArgoEventsLogger(),
+ }
+
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "gitlab",
+ Value: nil,
+ Type: "sq",
+ })
+ assert.Equal(t, false, valid.IsValid)
+ assert.Equal(t, common.ErrEventSourceTypeMismatch("gitlab"), valid.Reason)
+
+ content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gateways.EventSourceDir, "gitlab.yaml"))
+ assert.Nil(t, err)
+
+ var eventSource *v1alpha1.EventSource
+ err = yaml.Unmarshal(content, &eventSource)
+ assert.Nil(t, err)
+
+ for name, value := range eventSource.Spec.Gitlab {
+ fmt.Println(name)
+ content, err := yaml.Marshal(value)
+ assert.Nil(t, err)
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "gitlab",
+ Value: content,
+ Type: "gitlab",
+ })
+ fmt.Println(valid.Reason)
+ assert.Equal(t, true, valid.IsValid)
+ }
+}
diff --git a/gateways/community/hdfs/Dockerfile b/gateways/server/hdfs/Dockerfile
similarity index 100%
rename from gateways/community/hdfs/Dockerfile
rename to gateways/server/hdfs/Dockerfile
diff --git a/gateways/community/hdfs/client.go b/gateways/server/hdfs/client.go
similarity index 79%
rename from gateways/community/hdfs/client.go
rename to gateways/server/hdfs/client.go
index a8a5724de3..a7c6540357 100644
--- a/gateways/community/hdfs/client.go
+++ b/gateways/server/hdfs/client.go
@@ -3,6 +3,7 @@ package hdfs
import (
"fmt"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
"github.com/colinmarc/hdfs"
krb "gopkg.in/jcmturner/gokrb5.v5/client"
"gopkg.in/jcmturner/gokrb5.v5/config"
@@ -67,19 +68,19 @@ func getSecretKey(clientset kubernetes.Interface, namespace string, selector *co
}
// createHDFSConfig constructs HDFSConfig
-func createHDFSConfig(clientset kubernetes.Interface, namespace string, config *GatewayClientConfig) (*HDFSConfig, error) {
+func createHDFSConfig(clientset kubernetes.Interface, namespace string, hdfsEventSource *v1alpha1.HDFSEventSource) (*HDFSConfig, error) {
var krbConfig string
var krbOptions *KrbOptions
var err error
- if config.KrbConfigConfigMap != nil && config.KrbConfigConfigMap.Name != "" {
- krbConfig, err = getConfigMapKey(clientset, namespace, config.KrbConfigConfigMap)
+ if hdfsEventSource.KrbConfigConfigMap != nil && hdfsEventSource.KrbConfigConfigMap.Name != "" {
+ krbConfig, err = getConfigMapKey(clientset, namespace, hdfsEventSource.KrbConfigConfigMap)
if err != nil {
return nil, err
}
}
- if config.KrbCCacheSecret != nil && config.KrbCCacheSecret.Name != "" {
- bytes, err := getSecretKey(clientset, namespace, config.KrbCCacheSecret)
+ if hdfsEventSource.KrbCCacheSecret != nil && hdfsEventSource.KrbCCacheSecret.Name != "" {
+ bytes, err := getSecretKey(clientset, namespace, hdfsEventSource.KrbCCacheSecret)
if err != nil {
return nil, err
}
@@ -92,11 +93,11 @@ func createHDFSConfig(clientset kubernetes.Interface, namespace string, config *
CCache: ccache,
},
Config: krbConfig,
- ServicePrincipalName: config.KrbServicePrincipalName,
+ ServicePrincipalName: hdfsEventSource.KrbServicePrincipalName,
}
}
- if config.KrbKeytabSecret != nil && config.KrbKeytabSecret.Name != "" {
- bytes, err := getSecretKey(clientset, namespace, config.KrbKeytabSecret)
+ if hdfsEventSource.KrbKeytabSecret != nil && hdfsEventSource.KrbKeytabSecret.Name != "" {
+ bytes, err := getSecretKey(clientset, namespace, hdfsEventSource.KrbKeytabSecret)
if err != nil {
return nil, err
}
@@ -107,17 +108,17 @@ func createHDFSConfig(clientset kubernetes.Interface, namespace string, config *
krbOptions = &KrbOptions{
KeytabOptions: &KeytabOptions{
Keytab: ktb,
- Username: config.KrbUsername,
- Realm: config.KrbRealm,
+ Username: hdfsEventSource.KrbUsername,
+ Realm: hdfsEventSource.KrbRealm,
},
Config: krbConfig,
- ServicePrincipalName: config.KrbServicePrincipalName,
+ ServicePrincipalName: hdfsEventSource.KrbServicePrincipalName,
}
}
hdfsConfig := HDFSConfig{
- Addresses: config.Addresses,
- HDFSUser: config.HDFSUser,
+ Addresses: hdfsEventSource.Addresses,
+ HDFSUser: hdfsEventSource.HDFSUser,
KrbOptions: krbOptions,
}
return &hdfsConfig, nil
diff --git a/gateways/server/hdfs/cmd/main.go b/gateways/server/hdfs/cmd/main.go
new file mode 100644
index 0000000000..c789ec5d68
--- /dev/null
+++ b/gateways/server/hdfs/cmd/main.go
@@ -0,0 +1,23 @@
+package main
+
+import (
+ "os"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/hdfs"
+ "k8s.io/client-go/kubernetes"
+)
+
+func main() {
+ kubeConfig, _ := os.LookupEnv(common.EnvVarKubeConfig)
+ restConfig, err := common.GetClientConfig(kubeConfig)
+ if err != nil {
+ panic(err)
+ }
+ clientset := kubernetes.NewForConfigOrDie(restConfig)
+ server.StartGateway(&hdfs.EventListener{
+ Logger: common.NewArgoEventsLogger(),
+ K8sClient: clientset,
+ })
+}
diff --git a/gateways/server/hdfs/start.go b/gateways/server/hdfs/start.go
new file mode 100644
index 0000000000..a7f4fe9996
--- /dev/null
+++ b/gateways/server/hdfs/start.go
@@ -0,0 +1,182 @@
+package hdfs
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "time"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/common/fsevent"
+ "github.com/argoproj/argo-events/gateways/server/common/naivewatcher"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/colinmarc/hdfs"
+ "github.com/ghodss/yaml"
+ "github.com/sirupsen/logrus"
+ "k8s.io/client-go/kubernetes"
+)
+
+// EventListener implements Eventing for HDFS events
+type EventListener struct {
+ // Logger logs stuff
+ Logger *logrus.Logger
+ // k8sClient is kubernetes client
+ K8sClient kubernetes.Interface
+}
+
+// WatchableHDFS wraps hdfs.Client for naivewatcher
+type WatchableHDFS struct {
+ hdfscli *hdfs.Client
+}
+
+// Walk walks a directory
+func (w *WatchableHDFS) Walk(root string, walkFn filepath.WalkFunc) error {
+ return w.hdfscli.Walk(root, walkFn)
+}
+
+// GetFileID returns the file ID
+func (w *WatchableHDFS) GetFileID(fi os.FileInfo) interface{} {
+ return fi.Name()
+ // FIXME: Use HDFS File ID once it's exposed
+ // https://github.com/colinmarc/hdfs/pull/171
+ // return fi.Sys().(*hadoop_hdfs.HdfsFileStatusProto).GetFileID()
+}
+
+// StartEventSource starts an event source
+func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
+ defer server.Recover(eventSource.Name)
+
+ listener.Logger.WithField(common.LabelEventSource, eventSource.Name).Info("start processing the event source...")
+
+ dataCh := make(chan []byte)
+ errorCh := make(chan error)
+ doneCh := make(chan struct{}, 1)
+
+ go listener.listenEvents(eventSource, dataCh, errorCh, doneCh)
+
+ return server.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, listener.Logger)
+}
+
+// listenEvents listens to HDFS events
+func (listener *EventListener) listenEvents(eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
+ defer server.Recover(eventSource.Name)
+
+ logger := listener.Logger.WithField(common.LabelEventSource, eventSource.Name)
+
+ logger.Infoln("parsing the event source...")
+
+ var hdfsEventSource *v1alpha1.HDFSEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &hdfsEventSource); err != nil {
+ errorCh <- err
+ return
+ }
+
+ logger.Infoln("setting up HDFS configuration...")
+ hdfsConfig, err := createHDFSConfig(listener.K8sClient, hdfsEventSource.Namespace, hdfsEventSource)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+
+ logger.Infoln("setting up HDFS client...")
+ hdfscli, err := createHDFSClient(hdfsConfig.Addresses, hdfsConfig.HDFSUser, hdfsConfig.KrbOptions)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+ defer hdfscli.Close()
+
+ logger.Infoln("setting up a new watcher...")
+ watcher, err := naivewatcher.NewWatcher(&WatchableHDFS{hdfscli: hdfscli})
+ if err != nil {
+ errorCh <- err
+ return
+ }
+ defer watcher.Close()
+
+ intervalDuration := 1 * time.Minute
+ if hdfsEventSource.CheckInterval != "" {
+ d, err := time.ParseDuration(hdfsEventSource.CheckInterval)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+ intervalDuration = d
+ }
+
+ logger.Infoln("started HDFS watcher")
+ err = watcher.Start(intervalDuration)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+
+ // directory to watch must be available in HDFS. You can't watch a directory that is not present.
+ logger.Infoln("adding configured directory to watcher...")
+ err = watcher.Add(hdfsEventSource.Directory)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+
+ op := fsevent.NewOp(hdfsEventSource.Type)
+ var pathRegexp *regexp.Regexp
+ if hdfsEventSource.PathRegexp != "" {
+ pathRegexp, err = regexp.Compile(hdfsEventSource.PathRegexp)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+ }
+
+ logger.Infoln("listening to HDFS notifications...")
+ for {
+ select {
+ case event, ok := <-watcher.Events:
+ if !ok {
+ logger.Info("HDFS watcher has stopped")
+ // watcher stopped watching file events
+ errorCh <- fmt.Errorf("HDFS watcher stopped")
+ return
+ }
+ matched := false
+ relPath := strings.TrimPrefix(event.Name, hdfsEventSource.Directory)
+
+ if hdfsEventSource.Path != "" && hdfsEventSource.Path == relPath {
+ matched = true
+ } else if pathRegexp != nil && pathRegexp.MatchString(relPath) {
+ matched = true
+ }
+
+ if matched && (op&event.Op != 0) {
+ logger := logger.WithFields(
+ map[string]interface{}{
+ "event-type": event.Op.String(),
+ "descriptor-name": event.Name,
+ },
+ )
+ logger.Infoln("received an event")
+
+ logger.Infoln("parsing the event...")
+ payload, err := json.Marshal(event)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+
+ logger.Infoln("dispatching event on data channel")
+ dataCh <- payload
+ }
+ case err := <-watcher.Errors:
+ errorCh <- err
+ return
+ case <-doneCh:
+ return
+ }
+ }
+}
diff --git a/gateways/server/hdfs/validate.go b/gateways/server/hdfs/validate.go
new file mode 100644
index 0000000000..4c546fd13e
--- /dev/null
+++ b/gateways/server/hdfs/validate.go
@@ -0,0 +1,101 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package hdfs
+
+import (
+ "context"
+ "errors"
+ "time"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server/common/fsevent"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+)
+
+// ValidateEventSource validates hdfs event source
+func (listener *EventListener) ValidateEventSource(ctx context.Context, eventSource *gateways.EventSource) (*gateways.ValidEventSource, error) {
+ if apicommon.EventSourceType(eventSource.Type) != apicommon.HDFSEvent {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: common.ErrEventSourceTypeMismatch(string(apicommon.HDFSEvent)),
+ }, nil
+ }
+
+ var hdfsEventSource *v1alpha1.HDFSEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &hdfsEventSource); err != nil {
+ listener.Logger.WithError(err).Error("failed to parse the event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ if err := validate(hdfsEventSource); err != nil {
+ listener.Logger.WithError(err).Error("failed to validate HDFS event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ return &gateways.ValidEventSource{
+ IsValid: true,
+ }, nil
+}
+
+func validate(eventSource *v1alpha1.HDFSEventSource) error {
+ if eventSource == nil {
+ return common.ErrNilEventSource
+ }
+ if eventSource.Type == "" {
+ return errors.New("type is required")
+ }
+ op := fsevent.NewOp(eventSource.Type)
+ if op == 0 {
+ return errors.New("type is invalid")
+ }
+ if eventSource.CheckInterval != "" {
+ _, err := time.ParseDuration(eventSource.CheckInterval)
+ if err != nil {
+ return errors.New("failed to parse interval")
+ }
+ }
+ err := eventSource.WatchPathConfig.Validate()
+ if err != nil {
+ return err
+ }
+ if len(eventSource.Addresses) == 0 {
+ return errors.New("addresses is required")
+ }
+
+ hasKrbCCache := eventSource.KrbCCacheSecret != nil
+ hasKrbKeytab := eventSource.KrbKeytabSecret != nil
+
+ if eventSource.HDFSUser == "" && !hasKrbCCache && !hasKrbKeytab {
+ return errors.New("either hdfsUser, krbCCacheSecret or krbKeytabSecret is required")
+ }
+ if hasKrbKeytab && (eventSource.KrbServicePrincipalName == "" || eventSource.KrbConfigConfigMap == nil || eventSource.KrbUsername == "" || eventSource.KrbRealm == "") {
+ return errors.New("krbServicePrincipalName, krbConfigConfigMap, krbUsername and krbRealm are required with krbKeytabSecret")
+ }
+ if hasKrbCCache && (eventSource.KrbServicePrincipalName == "" || eventSource.KrbConfigConfigMap == nil) {
+ return errors.New("krbServicePrincipalName and krbConfigConfigMap are required with krbCCacheSecret")
+ }
+ return err
+}
diff --git a/gateways/server/hdfs/validate_test.go b/gateways/server/hdfs/validate_test.go
new file mode 100644
index 0000000000..172c2614be
--- /dev/null
+++ b/gateways/server/hdfs/validate_test.go
@@ -0,0 +1,50 @@
+package hdfs
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "testing"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValidateEventSource(t *testing.T) {
+ listener := &EventListener{
+ Logger: common.NewArgoEventsLogger(),
+ }
+
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "hdfs",
+ Value: nil,
+ Type: "sq",
+ })
+ assert.Equal(t, false, valid.IsValid)
+ assert.Equal(t, common.ErrEventSourceTypeMismatch("hdfs"), valid.Reason)
+
+ content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gateways.EventSourceDir, "hdfs.yaml"))
+ assert.Nil(t, err)
+
+ var eventSource *v1alpha1.EventSource
+ err = yaml.Unmarshal(content, &eventSource)
+ assert.Nil(t, err)
+
+ for name, value := range eventSource.Spec.HDFS {
+ fmt.Println(name)
+ content, err := yaml.Marshal(value)
+ assert.Nil(t, err)
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "hdfs",
+ Value: content,
+ Type: "hdfs",
+ })
+ fmt.Println(valid.Reason)
+ assert.Equal(t, true, valid.IsValid)
+ }
+}
diff --git a/gateways/core/stream/kafka/Dockerfile b/gateways/server/kafka/Dockerfile
similarity index 100%
rename from gateways/core/stream/kafka/Dockerfile
rename to gateways/server/kafka/Dockerfile
diff --git a/gateways/core/stream/amqp/cmd/main.go b/gateways/server/kafka/cmd/main.go
similarity index 76%
rename from gateways/core/stream/amqp/cmd/main.go
rename to gateways/server/kafka/cmd/main.go
index 6018d125c0..c145d9941a 100644
--- a/gateways/core/stream/amqp/cmd/main.go
+++ b/gateways/server/kafka/cmd/main.go
@@ -18,12 +18,12 @@ package main
import (
"github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "github.com/argoproj/argo-events/gateways/core/stream/amqp"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/kafka"
)
func main() {
- gateways.StartGateway(&amqp.AMQPEventSourceExecutor{
- Log: common.NewArgoEventsLogger(),
+ server.StartGateway(&kafka.EventListener{
+ Logger: common.NewArgoEventsLogger(),
})
}
diff --git a/gateways/server/kafka/start.go b/gateways/server/kafka/start.go
new file mode 100644
index 0000000000..b117f9cfc5
--- /dev/null
+++ b/gateways/server/kafka/start.go
@@ -0,0 +1,143 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kafka
+
+import (
+ "strconv"
+
+ "github.com/Shopify/sarama"
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "k8s.io/apimachinery/pkg/util/wait"
+)
+
+// EventListener implements Eventing kafka event source
+type EventListener struct {
+ // Logger logs stuff
+ Logger *logrus.Logger
+}
+
+func verifyPartitionAvailable(part int32, partitions []int32) bool {
+ for _, p := range partitions {
+ if part == p {
+ return true
+ }
+ }
+ return false
+}
+
+// StartEventSource starts an event source
+func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
+ listener.Logger.WithField(common.LabelEventSource, eventSource.Name).Infoln("started processing the event source...")
+
+ dataCh := make(chan []byte)
+ errorCh := make(chan error)
+ doneCh := make(chan struct{}, 1)
+
+ go listener.listenEvents(eventSource, dataCh, errorCh, doneCh)
+
+ return server.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, listener.Logger)
+}
+
+func (listener *EventListener) listenEvents(eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
+ defer server.Recover(eventSource.Name)
+
+ logger := listener.Logger.WithField(common.LabelEventSource, eventSource.Name)
+
+ logger.Infoln("parsing the event source...")
+ var kafkaEventSource *v1alpha1.KafkaEventSource
+ if err := yaml.Unmarshal(eventSource.Value, kafkaEventSource); err != nil {
+ errorCh <- err
+ return
+ }
+
+ var consumer sarama.Consumer
+
+ logger.Infoln("connecting to Kafka cluster...")
+ if err := server.Connect(&wait.Backoff{
+ Steps: kafkaEventSource.ConnectionBackoff.Steps,
+ Jitter: kafkaEventSource.ConnectionBackoff.Jitter,
+ Duration: kafkaEventSource.ConnectionBackoff.Duration,
+ Factor: kafkaEventSource.ConnectionBackoff.Factor,
+ }, func() error {
+ var err error
+ consumer, err = sarama.NewConsumer([]string{kafkaEventSource.URL}, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ logger.WithError(err).WithField(common.LabelURL, kafkaEventSource.URL).Error("failed to connect")
+ errorCh <- err
+ return
+ }
+
+ logger = logger.WithField("partition-id", kafkaEventSource.Partition)
+
+ logger.Infoln("parsing the partition value...")
+ pInt, err := strconv.ParseInt(kafkaEventSource.Partition, 10, 32)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+ partition := int32(pInt)
+
+ logger.Infoln("getting available partitions...")
+ availablePartitions, err := consumer.Partitions(kafkaEventSource.Topic)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+
+ logger.Infoln("verifying the partition exists within available partitions...")
+ if ok := verifyPartitionAvailable(partition, availablePartitions); !ok {
+ errorCh <- errors.Errorf("partition %d is not available", partition)
+ return
+ }
+
+ logger.Infoln("getting partition consumer...")
+ partitionConsumer, err := consumer.ConsumePartition(kafkaEventSource.Topic, partition, sarama.OffsetNewest)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+
+ logger.Info("listening to messages on the partition...")
+ for {
+ select {
+ case msg := <-partitionConsumer.Messages():
+ logger.Infoln("dispatching event on the data channel...")
+ dataCh <- msg.Value
+
+ case err := <-partitionConsumer.Errors():
+ errorCh <- err
+ return
+
+ case <-doneCh:
+ err = partitionConsumer.Close()
+ if err != nil {
+ logger.WithError(err).Error("failed to close consumer")
+ }
+ return
+ }
+ }
+}
diff --git a/gateways/server/kafka/validate.go b/gateways/server/kafka/validate.go
new file mode 100644
index 0000000000..1b1cd9b481
--- /dev/null
+++ b/gateways/server/kafka/validate.go
@@ -0,0 +1,75 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kafka
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+)
+
+// ValidateEventSource validates the gateway event source
+func (listener *EventListener) ValidateEventSource(ctx context.Context, eventSource *gateways.EventSource) (*gateways.ValidEventSource, error) {
+ if apicommon.EventSourceType(eventSource.Type) != apicommon.KafkaEvent {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: common.ErrEventSourceTypeMismatch(string(apicommon.KafkaEvent)),
+ }, nil
+ }
+
+ var kafkaGridEventSource *v1alpha1.KafkaEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &kafkaGridEventSource); err != nil {
+ listener.Logger.WithError(err).Error("failed to parse the event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ if err := validate(kafkaGridEventSource); err != nil {
+ listener.Logger.WithError(err).Error("failed to validate kafka event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ return &gateways.ValidEventSource{
+ IsValid: true,
+ }, nil
+}
+
+func validate(eventSource *v1alpha1.KafkaEventSource) error {
+ if eventSource == nil {
+ return common.ErrNilEventSource
+ }
+ if eventSource.URL == "" {
+ return fmt.Errorf("url must be specified")
+ }
+ if eventSource.Topic == "" {
+ return fmt.Errorf("topic must be specified")
+ }
+ if eventSource.Partition == "" {
+ return fmt.Errorf("partition must be specified")
+ }
+ return nil
+}
diff --git a/gateways/server/kafka/validate_test.go b/gateways/server/kafka/validate_test.go
new file mode 100644
index 0000000000..ecc555ca6b
--- /dev/null
+++ b/gateways/server/kafka/validate_test.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kafka
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "testing"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValidateKafkaEventSource(t *testing.T) {
+ listener := &EventListener{}
+
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "kafka",
+ Value: nil,
+ Type: "sq",
+ })
+ assert.Equal(t, false, valid.IsValid)
+ assert.Equal(t, common.ErrEventSourceTypeMismatch("kafka"), valid.Reason)
+
+ content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gateways.EventSourceDir, "kafka.yaml"))
+ assert.Nil(t, err)
+
+ var eventSource *v1alpha1.EventSource
+ err = yaml.Unmarshal(content, &eventSource)
+ assert.Nil(t, err)
+
+ for name, value := range eventSource.Spec.Kafka {
+ fmt.Println(name)
+ content, err := yaml.Marshal(value)
+ assert.Nil(t, err)
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "kafka",
+ Value: content,
+ Type: "kafka",
+ })
+ fmt.Println(valid.Reason)
+ assert.Equal(t, true, valid.IsValid)
+ }
+}
diff --git a/gateways/server/minio/Dockerfile b/gateways/server/minio/Dockerfile
new file mode 100644
index 0000000000..893b248671
--- /dev/null
+++ b/gateways/server/minio/Dockerfile
@@ -0,0 +1,3 @@
+FROM centos:7
+COPY dist/minio-gateway /bin/
+ENTRYPOINT [ "/bin/minio-gateway" ]
\ No newline at end of file
diff --git a/gateways/community/slack/cmd/main.go b/gateways/server/minio/cmd/main.go
similarity index 74%
rename from gateways/community/slack/cmd/main.go
rename to gateways/server/minio/cmd/main.go
index 57c07e1961..2036149a18 100644
--- a/gateways/community/slack/cmd/main.go
+++ b/gateways/server/minio/cmd/main.go
@@ -20,8 +20,8 @@ import (
"os"
"github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "github.com/argoproj/argo-events/gateways/community/slack"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/minio"
"k8s.io/client-go/kubernetes"
)
@@ -32,13 +32,15 @@ func main() {
panic(err)
}
clientset := kubernetes.NewForConfigOrDie(restConfig)
- namespace, ok := os.LookupEnv(common.EnvVarGatewayNamespace)
+
+ namespace, ok := os.LookupEnv(common.EnvVarNamespace)
if !ok {
panic("namespace is not provided")
}
- gateways.StartGateway(&slack.SlackEventSourceExecutor{
- Log: common.NewArgoEventsLogger(),
- Clientset: clientset,
- Namespace: namespace,
+
+ server.StartGateway(&minio.EventListener{
+ common.NewArgoEventsLogger(),
+ clientset,
+ namespace,
})
}
diff --git a/gateways/server/minio/start.go b/gateways/server/minio/start.go
new file mode 100644
index 0000000000..8a19617bca
--- /dev/null
+++ b/gateways/server/minio/start.go
@@ -0,0 +1,108 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package minio
+
+import (
+ "encoding/json"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/argoproj/argo-events/store"
+ "github.com/ghodss/yaml"
+ "github.com/minio/minio-go"
+ "github.com/sirupsen/logrus"
+ "k8s.io/client-go/kubernetes"
+)
+
+// MinioEventSourceListener implements Eventing for minio event sources
+type EventListener struct {
+ // Logger
+ Logger *logrus.Logger
+ // K8sClient is kubernetes client
+ K8sClient kubernetes.Interface
+ // Namespace where gateway is deployed
+ Namespace string
+}
+
+// StartEventSource activates an event source and streams back events
+func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
+ listener.Logger.WithField(common.LabelEventSource, eventSource.Name).Infoln("activating the event source...")
+
+ dataCh := make(chan []byte)
+ errorCh := make(chan error)
+ doneCh := make(chan struct{}, 1)
+
+ go listener.listenEvents(eventSource, dataCh, errorCh, doneCh)
+ return server.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, listener.Logger)
+}
+
+// listenEvents listens to minio bucket notifications
+func (listener *EventListener) listenEvents(eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
+ defer server.Recover(eventSource.Name)
+
+ Logger := listener.Logger.WithField(common.LabelEventSource, eventSource.Name)
+
+ Logger.Infoln("parsing minio event source...")
+
+ var minioEventSource *apicommon.S3Artifact
+ err := yaml.Unmarshal(eventSource.Value, &minioEventSource)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+
+ Logger.Info("started processing the event source...")
+
+ Logger.Info("retrieving access and secret key...")
+ accessKey, err := store.GetSecrets(listener.K8sClient, listener.Namespace, minioEventSource.AccessKey.Name, minioEventSource.AccessKey.Key)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+ secretKey, err := store.GetSecrets(listener.K8sClient, listener.Namespace, minioEventSource.SecretKey.Name, minioEventSource.SecretKey.Key)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+
+ Logger.Infoln("setting up a minio client...")
+ minioClient, err := minio.New(minioEventSource.Endpoint, accessKey, secretKey, !minioEventSource.Insecure)
+ if err != nil {
+ errorCh <- err
+ return
+ }
+
+ Logger.Info("started listening to bucket notifications...")
+ for notification := range minioClient.ListenBucketNotification(minioEventSource.Bucket.Name, minioEventSource.Filter.Prefix, minioEventSource.Filter.Suffix, minioEventSource.Events, doneCh) {
+ if notification.Err != nil {
+ errorCh <- notification.Err
+ return
+ }
+
+ Logger.Infoln("parsing notification from minio...")
+ payload, err := json.Marshal(notification.Records[0])
+ if err != nil {
+ errorCh <- err
+ return
+ }
+
+ Logger.Infoln("dispatching notification on data channel...")
+ dataCh <- payload
+ }
+}
diff --git a/gateways/core/artifact/start_test.go b/gateways/server/minio/start_test.go
similarity index 59%
rename from gateways/core/artifact/start_test.go
rename to gateways/server/minio/start_test.go
index 053449cc12..64474022e6 100644
--- a/gateways/core/artifact/start_test.go
+++ b/gateways/server/minio/start_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package artifact
+package minio
import (
"testing"
@@ -22,6 +22,7 @@ import (
"github.com/argoproj/argo-events/common"
"github.com/argoproj/argo-events/gateways"
apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/ghodss/yaml"
"github.com/smartystreets/goconvey/convey"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -30,15 +31,15 @@ import (
func TestListeEvents(t *testing.T) {
convey.Convey("Given an event source, listen to events", t, func() {
- ese := &S3EventSourceExecutor{
- Clientset: fake.NewSimpleClientset(),
- Log: common.NewArgoEventsLogger(),
+ listener := &EventListener{
+ K8sClient: fake.NewSimpleClientset(),
+ Logger: common.NewArgoEventsLogger(),
Namespace: "fake",
}
- secret, err := ese.Clientset.CoreV1().Secrets(ese.Namespace).Create(&corev1.Secret{
+ secret, err := listener.K8sClient.CoreV1().Secrets(listener.Namespace).Create(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "artifacts-minio",
- Namespace: ese.Namespace,
+ Namespace: listener.Namespace,
},
Data: map[string][]byte{
"accesskey": []byte("access"),
@@ -58,12 +59,42 @@ func TestListeEvents(t *testing.T) {
errCh2 <- err
}()
- ps, err := parseEventSource(es)
+ minioEventSource := &apicommon.S3Artifact{
+ Bucket: &apicommon.S3Bucket{
+ Name: "input",
+ },
+ Endpoint: "minio-service.argo-events:9000",
+ Events: []string{
+ "s3:ObjectCreated:Put",
+ },
+ Filter: &apicommon.S3Filter{
+ Prefix: "",
+ Suffix: "",
+ },
+ Insecure: true,
+ AccessKey: &corev1.SecretKeySelector{
+ Key: "accesskey",
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "artifacts-minio",
+ },
+ },
+ SecretKey: &corev1.SecretKeySelector{
+ Key: "secretkey",
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "artifacts-minio",
+ },
+ },
+ }
+
convey.So(err, convey.ShouldBeNil)
- ese.listenEvents(ps.(*apicommon.S3Artifact), &gateways.EventSource{
- Id: "1234",
- Data: es,
- Name: "fake",
+
+ body, err := yaml.Marshal(minioEventSource)
+ convey.So(err, convey.ShouldBeNil)
+
+ listener.listenEvents(&gateways.EventSource{
+ Id: "1234",
+ Value: body,
+ Name: "fake",
}, dataCh, errorCh, doneCh)
err = <-errCh2
diff --git a/gateways/server/minio/validate.go b/gateways/server/minio/validate.go
new file mode 100644
index 0000000000..377af789a9
--- /dev/null
+++ b/gateways/server/minio/validate.go
@@ -0,0 +1,84 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package minio
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/ghodss/yaml"
+ "github.com/minio/minio-go"
+)
+
+// ValidateEventSource validates the minio event source
+func (listener *EventListener) ValidateEventSource(ctx context.Context, eventSource *gateways.EventSource) (*gateways.ValidEventSource, error) {
+ if apicommon.EventSourceType(eventSource.Type) != apicommon.MinioEvent {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: common.ErrEventSourceTypeMismatch(string(apicommon.MinioEvent)),
+ }, nil
+ }
+
+ var minioEventSource *apicommon.S3Artifact
+ err := yaml.Unmarshal(eventSource.Value, &minioEventSource)
+ if err != nil {
+ listener.Logger.WithError(err).Error("failed to parse the minio event source")
+ return &gateways.ValidEventSource{
+ Reason: err.Error(),
+ IsValid: false,
+ }, nil
+ }
+
+ if err := validate(minioEventSource); err != nil {
+ return &gateways.ValidEventSource{
+ Reason: err.Error(),
+ IsValid: false,
+ }, nil
+ }
+ return &gateways.ValidEventSource{
+ IsValid: true,
+ }, nil
+}
+
+func validate(eventSource *apicommon.S3Artifact) error {
+ if eventSource == nil {
+ return common.ErrNilEventSource
+ }
+ if eventSource.AccessKey == nil {
+ return fmt.Errorf("access key can't be empty")
+ }
+ if eventSource.SecretKey == nil {
+ return fmt.Errorf("secret key can't be empty")
+ }
+ if eventSource.Endpoint == "" {
+ return fmt.Errorf("endpoint url can't be empty")
+ }
+ if eventSource.Bucket != nil && eventSource.Bucket.Name == "" {
+ return fmt.Errorf("bucket name can't be empty")
+ }
+ if eventSource.Events != nil {
+ for _, event := range eventSource.Events {
+ if minio.NotificationEventType(event) == "" {
+ return fmt.Errorf("unknown event %s", event)
+ }
+ }
+ }
+ return nil
+}
diff --git a/gateways/server/minio/validate_test.go b/gateways/server/minio/validate_test.go
new file mode 100644
index 0000000000..1ab9f89d47
--- /dev/null
+++ b/gateways/server/minio/validate_test.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package minio
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "testing"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValidateS3EventSource(t *testing.T) {
+ listener := &EventListener{
+ Logger: common.NewArgoEventsLogger(),
+ }
+
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "minio",
+ Value: nil,
+ Type: "sq",
+ })
+ assert.Equal(t, false, valid.IsValid)
+ assert.Equal(t, common.ErrEventSourceTypeMismatch("minio"), valid.Reason)
+
+ content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gateways.EventSourceDir, "minio.yaml"))
+ assert.Nil(t, err)
+
+ var eventSource *v1alpha1.EventSource
+ err = yaml.Unmarshal(content, &eventSource)
+ assert.Nil(t, err)
+
+ for name, value := range eventSource.Spec.Minio {
+ fmt.Println(name)
+ content, err := yaml.Marshal(value)
+ assert.Nil(t, err)
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "minio",
+ Value: content,
+ Type: "minio",
+ })
+ fmt.Println(valid.Reason)
+ assert.Equal(t, true, valid.IsValid)
+ }
+}
diff --git a/gateways/core/stream/mqtt/Dockerfile b/gateways/server/mqtt/Dockerfile
similarity index 100%
rename from gateways/core/stream/mqtt/Dockerfile
rename to gateways/server/mqtt/Dockerfile
diff --git a/gateways/core/stream/mqtt/cmd/main.go b/gateways/server/mqtt/cmd/main.go
similarity index 76%
rename from gateways/core/stream/mqtt/cmd/main.go
rename to gateways/server/mqtt/cmd/main.go
index 14a4c1dc3c..c383529f16 100644
--- a/gateways/core/stream/mqtt/cmd/main.go
+++ b/gateways/server/mqtt/cmd/main.go
@@ -18,12 +18,12 @@ package main
import (
"github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "github.com/argoproj/argo-events/gateways/core/stream/mqtt"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/mqtt"
)
func main() {
- gateways.StartGateway(&mqtt.MqttEventSourceExecutor{
- Log: common.NewArgoEventsLogger(),
+ server.StartGateway(&mqtt.EventListener{
+ Logger: common.NewArgoEventsLogger(),
})
}
diff --git a/gateways/server/mqtt/start.go b/gateways/server/mqtt/start.go
new file mode 100644
index 0000000000..af429fa2b4
--- /dev/null
+++ b/gateways/server/mqtt/start.go
@@ -0,0 +1,110 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package mqtt
+
+import (
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ mqttlib "github.com/eclipse/paho.mqtt.golang"
+ "github.com/ghodss/yaml"
+ "github.com/sirupsen/logrus"
+ "k8s.io/apimachinery/pkg/util/wait"
+)
+
+// EventListener implements Eventing for mqtt event source
+type EventListener struct {
+ // Logger to log stuff
+ Logger *logrus.Logger
+}
+
+// StartEventSource starts an event source
+func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
+ listener.Logger.WithField(common.LabelEventSource, eventSource.Name).Infoln("started processing the event source...")
+
+ dataCh := make(chan []byte)
+ errorCh := make(chan error)
+ doneCh := make(chan struct{}, 1)
+
+ go listener.listenEvents(eventSource, dataCh, errorCh, doneCh)
+
+ return server.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, listener.Logger)
+}
+
+// listenEvents listens to events from a mqtt broker
+func (listener *EventListener) listenEvents(eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
+ defer server.Recover(eventSource.Name)
+
+ logger := listener.Logger.WithField(common.LabelEventSource, eventSource.Name)
+
+ logger.Infoln("parsing the event source...")
+ var mqttEventSource *v1alpha1.MQTTEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &mqttEventSource); err != nil {
+ errorCh <- err
+ return
+ }
+
+ logger = logger.WithFields(
+ map[string]interface{}{
+ common.LabelURL: mqttEventSource.URL,
+ common.LabelClientID: mqttEventSource.ClientId,
+ },
+ )
+
+ logger.Infoln("setting up the message handler...")
+ handler := func(c mqttlib.Client, msg mqttlib.Message) {
+ logger.Infoln("dispatching event on data channel...")
+ dataCh <- msg.Payload()
+ }
+
+ logger.Infoln("setting up the mqtt broker client...")
+ opts := mqttlib.NewClientOptions().AddBroker(mqttEventSource.URL).SetClientID(mqttEventSource.ClientId)
+
+ var client mqttlib.Client
+
+ logger.Infoln("connecting to mqtt broker...")
+ if err := server.Connect(&wait.Backoff{
+ Factor: mqttEventSource.ConnectionBackoff.Factor,
+ Duration: mqttEventSource.ConnectionBackoff.Duration,
+ Jitter: mqttEventSource.ConnectionBackoff.Jitter,
+ Steps: mqttEventSource.ConnectionBackoff.Steps,
+ }, func() error {
+ client = mqttlib.NewClient(opts)
+ if token := client.Connect(); token.Wait() && token.Error() != nil {
+ return token.Error()
+ }
+ return nil
+ }); err != nil {
+ logger.Info("failed to connect")
+ errorCh <- err
+ return
+ }
+
+ logger.Info("subscribing to the topic...")
+ if token := client.Subscribe(mqttEventSource.Topic, 0, handler); token.Wait() && token.Error() != nil {
+ logger.WithError(token.Error()).Error("failed to subscribe")
+ errorCh <- token.Error()
+ return
+ }
+
+ <-doneCh
+ token := client.Unsubscribe(mqttEventSource.Topic)
+ if token.Error() != nil {
+ logger.WithError(token.Error()).Error("failed to unsubscribe client")
+ }
+}
diff --git a/gateways/server/mqtt/validate.go b/gateways/server/mqtt/validate.go
new file mode 100644
index 0000000000..d1b0eb5c78
--- /dev/null
+++ b/gateways/server/mqtt/validate.go
@@ -0,0 +1,75 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package mqtt
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+)
+
+// ValidateEventSource validates mqtt event source
+func (listener *EventListener) ValidateEventSource(ctx context.Context, eventSource *gateways.EventSource) (*gateways.ValidEventSource, error) {
+ if apicommon.EventSourceType(eventSource.Type) != apicommon.MQTTEvent {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: common.ErrEventSourceTypeMismatch(string(apicommon.MQTTEvent)),
+ }, nil
+ }
+
+ var mqttGridEventSource *v1alpha1.MQTTEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &mqttGridEventSource); err != nil {
+ listener.Logger.WithError(err).Error("failed to parse the event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ if err := validate(mqttGridEventSource); err != nil {
+ listener.Logger.WithError(err).Error("failed to validate mqtt event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ return &gateways.ValidEventSource{
+ IsValid: true,
+ }, nil
+}
+
+func validate(eventSource *v1alpha1.MQTTEventSource) error {
+ if eventSource == nil {
+ return common.ErrNilEventSource
+ }
+ if eventSource.URL == "" {
+ return fmt.Errorf("url must be specified")
+ }
+ if eventSource.Topic == "" {
+ return fmt.Errorf("topic must be specified")
+ }
+ if eventSource.ClientId == "" {
+ return fmt.Errorf("client id must be specified")
+ }
+ return nil
+}
diff --git a/gateways/server/mqtt/validate_test.go b/gateways/server/mqtt/validate_test.go
new file mode 100644
index 0000000000..47d96bad0d
--- /dev/null
+++ b/gateways/server/mqtt/validate_test.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package mqtt
+
+import (
+ "context"
+ "fmt"
+ "github.com/stretchr/testify/assert"
+ "io/ioutil"
+ "testing"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+)
+
+func TestValidateMqttEventSource(t *testing.T) {
+ listener := &EventListener{}
+
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "mqtt",
+ Value: nil,
+ Type: "sq",
+ })
+ assert.Equal(t, false, valid.IsValid)
+ assert.Equal(t, common.ErrEventSourceTypeMismatch("mqtt"), valid.Reason)
+
+ content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gateways.EventSourceDir, "mqtt.yaml"))
+ assert.Nil(t, err)
+
+ var eventSource *v1alpha1.EventSource
+ err = yaml.Unmarshal(content, &eventSource)
+ assert.Nil(t, err)
+
+ for name, value := range eventSource.Spec.MQTT {
+ fmt.Println(name)
+ content, err := yaml.Marshal(value)
+ assert.Nil(t, err)
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "mqtt",
+ Value: content,
+ Type: "mqtt",
+ })
+ fmt.Println(valid.Reason)
+ assert.Equal(t, true, valid.IsValid)
+ }
+}
diff --git a/gateways/core/stream/nats/Dockerfile b/gateways/server/nats/Dockerfile
similarity index 100%
rename from gateways/core/stream/nats/Dockerfile
rename to gateways/server/nats/Dockerfile
diff --git a/gateways/server/nats/cmd/main.go b/gateways/server/nats/cmd/main.go
new file mode 100644
index 0000000000..75f311a647
--- /dev/null
+++ b/gateways/server/nats/cmd/main.go
@@ -0,0 +1,29 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/nats"
+)
+
+func main() {
+ server.StartGateway(&nats.EventListener{
+ Logger: common.NewArgoEventsLogger(),
+ })
+}
diff --git a/gateways/server/nats/start.go b/gateways/server/nats/start.go
new file mode 100644
index 0000000000..6d00211fc1
--- /dev/null
+++ b/gateways/server/nats/start.go
@@ -0,0 +1,109 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package nats
+
+import (
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ natslib "github.com/nats-io/go-nats"
+ "github.com/sirupsen/logrus"
+ "k8s.io/apimachinery/pkg/util/wait"
+)
+
+// EventListener implements Eventing for nats event source
+type EventListener struct {
+ // Logger to log stuff
+ Logger *logrus.Logger
+}
+
+// StartEventSource starts an event source
+func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
+ listener.Logger.WithField(common.LabelEventSource, eventSource.Name).Infoln("started processing the event source...")
+
+ dataCh := make(chan []byte)
+ errorCh := make(chan error)
+ doneCh := make(chan struct{}, 1)
+
+ go listener.listenEvents(eventSource, dataCh, errorCh, doneCh)
+
+ return server.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, listener.Logger)
+}
+
+// listenEvents listens events from nats cluster
+func (listener *EventListener) listenEvents(eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
+ defer server.Recover(eventSource.Name)
+
+ logger := listener.Logger.WithField(common.LabelEventSource, eventSource.Name)
+
+ logger.Infoln("parsing the event source...")
+ var natsEventSource *v1alpha1.NATSEventsSource
+ if err := yaml.Unmarshal(eventSource.Value, &natsEventSource); err != nil {
+ errorCh <- err
+ return
+ }
+
+ logger = logger.WithFields(
+ map[string]interface{}{
+ common.LabelEventSource: eventSource.Name,
+ common.LabelURL: natsEventSource.URL,
+ "subject": natsEventSource.Subject,
+ },
+ )
+
+ var conn *natslib.Conn
+
+ logger.Infoln("connecting to nats cluster...")
+ if err := server.Connect(&wait.Backoff{
+ Steps: natsEventSource.ConnectionBackoff.Steps,
+ Jitter: natsEventSource.ConnectionBackoff.Jitter,
+ Duration: natsEventSource.ConnectionBackoff.Duration,
+ Factor: natsEventSource.ConnectionBackoff.Factor,
+ }, func() error {
+ var err error
+ if conn, err = natslib.Connect(natsEventSource.URL); err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ logger.WithError(err).Error("failed to connect to nats cluster")
+ errorCh <- err
+ return
+ }
+
+ logger.Info("subscribing to messages on the queue...")
+ _, err := conn.Subscribe(natsEventSource.Subject, func(msg *natslib.Msg) {
+ logger.Infoln("dispatching event on data channel...")
+ dataCh <- msg.Data
+ })
+
+ if err != nil {
+ logger.WithError(err).Error("failed to subscribe")
+ errorCh <- err
+ return
+ }
+
+ conn.Flush()
+ if err := conn.LastError(); err != nil {
+ errorCh <- err
+ return
+ }
+
+ <-doneCh
+}
diff --git a/gateways/server/nats/validate.go b/gateways/server/nats/validate.go
new file mode 100644
index 0000000000..2e11bcd677
--- /dev/null
+++ b/gateways/server/nats/validate.go
@@ -0,0 +1,72 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package nats
+
+import (
+ "context"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/pkg/errors"
+)
+
+// ValidateEventSource validates nats event source
+func (listener *EventListener) ValidateEventSource(ctx context.Context, eventSource *gateways.EventSource) (*gateways.ValidEventSource, error) {
+ if apicommon.EventSourceType(eventSource.Type) != apicommon.NATSEvent {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: common.ErrEventSourceTypeMismatch(string(apicommon.NATSEvent)),
+ }, nil
+ }
+
+ var natsGridEventSource *v1alpha1.NATSEventsSource
+ if err := yaml.Unmarshal(eventSource.Value, &natsGridEventSource); err != nil {
+ listener.Logger.WithError(err).Error("failed to parse the event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ if err := validate(natsGridEventSource); err != nil {
+ listener.Logger.WithError(err).Error("failed to validate nats event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ return &gateways.ValidEventSource{
+ IsValid: true,
+ }, nil
+}
+
+func validate(eventSource *v1alpha1.NATSEventsSource) error {
+ if eventSource == nil {
+ return errors.New("configuration must be non empty")
+ }
+ if eventSource.URL == "" {
+ return errors.New("url must be specified")
+ }
+ if eventSource.Subject == "" {
+ return errors.New("subject must be specified")
+ }
+ return nil
+}
diff --git a/gateways/server/nats/validate_test.go b/gateways/server/nats/validate_test.go
new file mode 100644
index 0000000000..105588d0f8
--- /dev/null
+++ b/gateways/server/nats/validate_test.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package nats
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "testing"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValidateNatsEventSource(t *testing.T) {
+ listener := &EventListener{}
+
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "nats",
+ Value: nil,
+ Type: "sq",
+ })
+ assert.Equal(t, false, valid.IsValid)
+ assert.Equal(t, common.ErrEventSourceTypeMismatch("nats"), valid.Reason)
+
+ content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gateways.EventSourceDir, "nats.yaml"))
+ assert.Nil(t, err)
+
+ var eventSource *v1alpha1.EventSource
+ err = yaml.Unmarshal(content, &eventSource)
+ assert.Nil(t, err)
+
+ for name, value := range eventSource.Spec.NATS {
+ fmt.Println(name)
+ content, err := yaml.Marshal(value)
+ assert.Nil(t, err)
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "nats",
+ Value: content,
+ Type: "nats",
+ })
+ fmt.Println(valid.Reason)
+ assert.Equal(t, true, valid.IsValid)
+ }
+}
diff --git a/gateways/core/resource/Dockerfile b/gateways/server/resource/Dockerfile
similarity index 100%
rename from gateways/core/resource/Dockerfile
rename to gateways/server/resource/Dockerfile
diff --git a/gateways/core/resource/cmd/main.go b/gateways/server/resource/cmd/main.go
similarity index 79%
rename from gateways/core/resource/cmd/main.go
rename to gateways/server/resource/cmd/main.go
index 8ca6c1d882..242e3b876e 100644
--- a/gateways/core/resource/cmd/main.go
+++ b/gateways/server/resource/cmd/main.go
@@ -20,8 +20,8 @@ import (
"os"
"github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "github.com/argoproj/argo-events/gateways/core/resource"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/resource"
)
func main() {
@@ -30,8 +30,8 @@ func main() {
if err != nil {
panic(err)
}
- gateways.StartGateway(&resource.ResourceEventSourceExecutor{
- Log: common.NewArgoEventsLogger(),
+ server.StartGateway(&resource.EventListener{
+ Logger: common.NewArgoEventsLogger(),
K8RestConfig: rest,
})
}
diff --git a/gateways/core/resource/start.go b/gateways/server/resource/start.go
similarity index 62%
rename from gateways/core/resource/start.go
rename to gateways/server/resource/start.go
index 333d4b7f6f..b5e1de18fc 100644
--- a/gateways/core/resource/start.go
+++ b/gateways/server/resource/start.go
@@ -23,7 +23,11 @@ import (
"github.com/argoproj/argo-events/common"
"github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
@@ -32,53 +36,73 @@ import (
"k8s.io/apimachinery/pkg/selection"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/dynamic/dynamicinformer"
+ "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
)
-// StartEventSource starts an event source
-func (executor *ResourceEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
- log := executor.Log.WithField(common.LabelEventSource, eventSource.Name)
- log.Info("operating on event source")
+// InformerEvent holds event generated from resource state change
+type InformerEvent struct {
+ Obj interface{}
+ OldObj interface{}
+ Type v1alpha1.ResourceEventType
+}
- config, err := parseEventSource(eventSource.Data)
- if err != nil {
- log.WithError(err).Error("failed to parse event source")
- return err
- }
+// EventListener implements Eventing
+type EventListener struct {
+ // Logger to log stuff
+ Logger *logrus.Logger
+ // K8RestConfig is kubernetes cluster config
+ K8RestConfig *rest.Config
+}
+
+// StartEventSource starts an event source
+func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
+ listener.Logger.WithField(common.LabelEventSource, eventSource.Name).Infoln("activating the event source...")
dataCh := make(chan []byte)
errorCh := make(chan error)
doneCh := make(chan struct{}, 1)
- go executor.listenEvents(config.(*resource), eventSource, dataCh, errorCh, doneCh)
+ go listener.listenEvents(eventSource, dataCh, errorCh, doneCh)
- return gateways.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, executor.Log)
+ return server.HandleEventsFromEventSource(eventSource.Name, eventStream, dataCh, errorCh, doneCh, listener.Logger)
}
// listenEvents watches resource updates and consume those events
-func (executor *ResourceEventSourceExecutor) listenEvents(resourceCfg *resource, eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
- defer gateways.Recover(eventSource.Name)
+func (listener *EventListener) listenEvents(eventSource *gateways.EventSource, dataCh chan []byte, errorCh chan error, doneCh chan struct{}) {
+ defer server.Recover(eventSource.Name)
+
+ logger := listener.Logger.WithField(common.LabelEventSource, eventSource.Name)
+
+ logger.Infoln("started processing the event source...")
- executor.Log.WithField(common.LabelEventSource, eventSource.Name).Info("started listening resource notifications")
+ logger.Infoln("parsing resource event source...")
+ var resourceEventSource *v1alpha1.ResourceEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &resourceEventSource); err != nil {
+ errorCh <- err
+ return
+ }
- client, err := dynamic.NewForConfig(executor.K8RestConfig)
+ logger.Infoln("setting up a K8s client")
+ client, err := dynamic.NewForConfig(listener.K8RestConfig)
if err != nil {
errorCh <- err
return
}
gvr := schema.GroupVersionResource{
- Group: resourceCfg.Group,
- Version: resourceCfg.Version,
- Resource: resourceCfg.Resource,
+ Group: resourceEventSource.Group,
+ Version: resourceEventSource.Version,
+ Resource: resourceEventSource.Resource,
}
client.Resource(gvr)
options := &metav1.ListOptions{}
- if resourceCfg.Filter != nil && resourceCfg.Filter.Labels != nil {
- sel, err := LabelSelector(resourceCfg.Filter.Labels)
+ logger.Infoln("configuring label selectors if filters are selected...")
+ if resourceEventSource.Filter != nil && resourceEventSource.Filter.Labels != nil {
+ sel, err := LabelSelector(resourceEventSource.Filter.Labels)
if err != nil {
errorCh <- err
return
@@ -86,8 +110,8 @@ func (executor *ResourceEventSourceExecutor) listenEvents(resourceCfg *resource,
options.LabelSelector = sel.String()
}
- if resourceCfg.Filter != nil && resourceCfg.Filter.Fields != nil {
- sel, err := LabelSelector(resourceCfg.Filter.Fields)
+ if resourceEventSource.Filter != nil && resourceEventSource.Filter.Fields != nil {
+ sel, err := LabelSelector(resourceEventSource.Filter.Fields)
if err != nil {
errorCh <- err
return
@@ -99,13 +123,15 @@ func (executor *ResourceEventSourceExecutor) listenEvents(resourceCfg *resource,
op = options
}
- factory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(client, 0, resourceCfg.Namespace, tweakListOptions)
+ logger.Infoln("setting up informer factory...")
+ factory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(client, 0, resourceEventSource.Namespace, tweakListOptions)
informer := factory.ForResource(gvr)
informerEventCh := make(chan *InformerEvent)
go func() {
+ logger.Infoln("listening to resource events...")
for {
select {
case event, ok := <-informerEventCh:
@@ -114,11 +140,11 @@ func (executor *ResourceEventSourceExecutor) listenEvents(resourceCfg *resource,
}
eventBody, err := json.Marshal(event)
if err != nil {
- executor.Log.WithField(common.LabelEventSource, eventSource.Name).WithError(err).Errorln("failed to parse event from resource informer")
+ logger.WithError(err).Errorln("failed to parse event from resource informer")
continue
}
- if err := passFilters(event.Obj.(*unstructured.Unstructured), resourceCfg.Filter); err != nil {
- executor.Log.WithField(common.LabelEventSource, eventSource.Name).WithError(err).Warnln("failed to apply the filter")
+ if err := passFilters(event.Obj.(*unstructured.Unstructured), resourceEventSource.Filter); err != nil {
+ logger.WithError(err).Warnln("failed to apply the filter")
continue
}
dataCh <- eventBody
@@ -132,27 +158,27 @@ func (executor *ResourceEventSourceExecutor) listenEvents(resourceCfg *resource,
AddFunc: func(obj interface{}) {
informerEventCh <- &InformerEvent{
Obj: obj,
- Type: ADD,
+ Type: v1alpha1.ADD,
}
},
UpdateFunc: func(oldObj, newObj interface{}) {
informerEventCh <- &InformerEvent{
Obj: newObj,
OldObj: oldObj,
- Type: UPDATE,
+ Type: v1alpha1.UPDATE,
}
},
DeleteFunc: func(obj interface{}) {
informerEventCh <- &InformerEvent{
Obj: obj,
- Type: DELETE,
+ Type: v1alpha1.DELETE,
}
},
},
)
sharedInformer.Run(doneCh)
- executor.Log.WithField(common.LabelEventSource, eventSource.Name).Infoln("resource informer is stopped")
+ logger.Infoln("resource informer is stopped")
close(informerEventCh)
close(doneCh)
}
@@ -193,7 +219,7 @@ func FieldSelector(fieldSelectors map[string]string) (fields.Selector, error) {
}
// helper method to check if the object passed the user defined filters
-func passFilters(obj *unstructured.Unstructured, filter *ResourceFilter) error {
+func passFilters(obj *unstructured.Unstructured, filter *v1alpha1.ResourceFilter) error {
// no filters are applied.
if filter == nil {
return nil
diff --git a/gateways/core/resource/start_test.go b/gateways/server/resource/start_test.go
similarity index 72%
rename from gateways/core/resource/start_test.go
rename to gateways/server/resource/start_test.go
index f0c06d0b9a..0ece326906 100644
--- a/gateways/core/resource/start_test.go
+++ b/gateways/server/resource/start_test.go
@@ -17,19 +17,33 @@ limitations under the License.
package resource
import (
+ "testing"
+
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
"github.com/mitchellh/mapstructure"
"github.com/smartystreets/goconvey/convey"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/kubernetes/fake"
- "testing"
)
func TestFilter(t *testing.T) {
convey.Convey("Given a resource object, apply filter on it", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
+ resourceEventSource := &v1alpha1.ResourceEventSource{
+ Namespace: "fake",
+ GroupVersionResource: metav1.GroupVersionResource{
+ Group: "",
+ Resource: "pods",
+ Version: "v1",
+ },
+ Filter: &v1alpha1.ResourceFilter{
+ Labels: map[string]string{
+ "workflows.argoproj.io/phase": "Succeeded",
+ "name": "my-workflow",
+ },
+ },
+ }
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "fake",
@@ -40,7 +54,7 @@ func TestFilter(t *testing.T) {
},
},
}
- pod, err = fake.NewSimpleClientset().CoreV1().Pods("fake").Create(pod)
+ pod, err := fake.NewSimpleClientset().CoreV1().Pods("fake").Create(pod)
convey.So(err, convey.ShouldBeNil)
outmap := make(map[string]interface{})
@@ -49,7 +63,7 @@ func TestFilter(t *testing.T) {
err = passFilters(&unstructured.Unstructured{
Object: outmap,
- }, ps.(*resource).Filter)
+ }, resourceEventSource.Filter)
convey.So(err, convey.ShouldBeNil)
})
}
diff --git a/gateways/server/resource/validate.go b/gateways/server/resource/validate.go
new file mode 100644
index 0000000000..727dec6652
--- /dev/null
+++ b/gateways/server/resource/validate.go
@@ -0,0 +1,72 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+)
+
+// ValidateEventSource validates a resource event source
+func (listener *EventListener) ValidateEventSource(ctx context.Context, eventSource *gateways.EventSource) (*gateways.ValidEventSource, error) {
+ if apicommon.EventSourceType(eventSource.Type) != apicommon.ResourceEvent {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: common.ErrEventSourceTypeMismatch(string(apicommon.ResourceEvent)),
+ }, nil
+ }
+
+ var resourceEventSource *v1alpha1.ResourceEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &resourceEventSource); err != nil {
+ listener.Logger.WithError(err).Errorln("failed to parse the event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, err
+ }
+
+ if err := validate(resourceEventSource); err != nil {
+ listener.Logger.WithError(err).Errorln("failed to validate the event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, err
+ }
+
+ return &gateways.ValidEventSource{
+ IsValid: true,
+ }, nil
+}
+
+func validate(eventSource *v1alpha1.ResourceEventSource) error {
+ if eventSource == nil {
+ return common.ErrNilEventSource
+ }
+ if eventSource.Version == "" {
+ return fmt.Errorf("version must be specified")
+ }
+ if eventSource.Resource == "" {
+ return fmt.Errorf("resource must be specified")
+ }
+ return nil
+}
diff --git a/gateways/server/resource/validate_test.go b/gateways/server/resource/validate_test.go
new file mode 100644
index 0000000000..ea6e87cddb
--- /dev/null
+++ b/gateways/server/resource/validate_test.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "testing"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestEventListener_ValidateEventSource(t *testing.T) {
+ listener := &EventListener{
+ Logger: common.NewArgoEventsLogger(),
+ }
+
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "resource",
+ Value: nil,
+ Type: "sq",
+ })
+ assert.Equal(t, false, valid.IsValid)
+ assert.Equal(t, common.ErrEventSourceTypeMismatch("resource"), valid.Reason)
+
+ content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gateways.EventSourceDir, "resource.yaml"))
+ assert.Nil(t, err)
+
+ var eventSource *v1alpha1.EventSource
+ err = yaml.Unmarshal(content, &eventSource)
+ assert.Nil(t, err)
+
+ for name, value := range eventSource.Spec.Resource {
+ fmt.Println(name)
+ content, err := yaml.Marshal(value)
+ assert.Nil(t, err)
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "resource",
+ Value: content,
+ Type: "resource",
+ })
+ fmt.Println(valid.Reason)
+ assert.Equal(t, true, valid.IsValid)
+ }
+}
diff --git a/gateways/gateway.go b/gateways/server/server.go
similarity index 80%
rename from gateways/gateway.go
rename to gateways/server/server.go
index e29c6d9b3e..dc7bc407d2 100644
--- a/gateways/gateway.go
+++ b/gateways/server/server.go
@@ -14,20 +14,21 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package gateways
+package server
import (
"fmt"
+ "net"
+ "os"
+
"github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
- "net"
- "os"
- "runtime/debug"
)
// StartGateway start a gateway
-func StartGateway(es EventingServer) {
+func StartGateway(es gateways.EventingServer) {
port, ok := os.LookupEnv(common.EnvVarGatewayServerPort)
if !ok {
panic(fmt.Errorf("port is not provided"))
@@ -37,7 +38,7 @@ func StartGateway(es EventingServer) {
panic(err)
}
srv := grpc.NewServer()
- RegisterEventingServer(srv, es)
+ gateways.RegisterEventingServer(srv, es)
fmt.Println("starting gateway server")
@@ -50,17 +51,16 @@ func StartGateway(es EventingServer) {
func Recover(eventSource string) {
if r := recover(); r != nil {
fmt.Printf("recovered event source %s from error. recover: %v", eventSource, r)
- debug.PrintStack()
}
}
// HandleEventsFromEventSource handles events from the event source.
-func HandleEventsFromEventSource(name string, eventStream Eventing_StartEventSourceServer, dataCh chan []byte, errorCh chan error, doneCh chan struct{}, log *logrus.Logger) error {
+func HandleEventsFromEventSource(name string, eventStream gateways.Eventing_StartEventSourceServer, dataCh chan []byte, errorCh chan error, doneCh chan struct{}, log *logrus.Logger) error {
for {
select {
case data := <-dataCh:
log.WithField(common.LabelEventSource, name).Info("new event received, dispatching to gateway client")
- err := eventStream.Send(&Event{
+ err := eventStream.Send(&gateways.Event{
Name: name,
Payload: data,
})
@@ -69,7 +69,7 @@ func HandleEventsFromEventSource(name string, eventStream Eventing_StartEventSou
}
case err := <-errorCh:
- log.WithField(common.LabelEventSource, name).WithError(err).Error("error occurred while getting event from event source")
+ log.WithField(common.LabelEventSource, name).WithError(err).Error("error occurred processing the event source")
return err
case <-eventStream.Context().Done():
diff --git a/gateways/gateway_test.go b/gateways/server/server_test.go
similarity index 93%
rename from gateways/gateway_test.go
rename to gateways/server/server_test.go
index f27e59669f..508ea74e86 100644
--- a/gateways/gateway_test.go
+++ b/gateways/server/server_test.go
@@ -14,24 +14,26 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package gateways
+package server
import (
"context"
"fmt"
+ "testing"
+ "time"
+
"github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
"github.com/smartystreets/goconvey/convey"
"google.golang.org/grpc/metadata"
- "testing"
- "time"
)
type FakeGRPCStream struct {
- SentData *Event
+ SentData *gateways.Event
Ctx context.Context
}
-func (f *FakeGRPCStream) Send(event *Event) error {
+func (f *FakeGRPCStream) Send(event *gateways.Event) error {
f.SentData = event
return nil
}
diff --git a/gateways/community/slack/Dockerfile b/gateways/server/slack/Dockerfile
similarity index 100%
rename from gateways/community/slack/Dockerfile
rename to gateways/server/slack/Dockerfile
diff --git a/gateways/server/slack/cmd/main.go b/gateways/server/slack/cmd/main.go
new file mode 100644
index 0000000000..07ba88efb4
--- /dev/null
+++ b/gateways/server/slack/cmd/main.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "os"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/slack"
+ "k8s.io/client-go/kubernetes"
+)
+
+func main() {
+ kubeConfig, _ := os.LookupEnv(common.EnvVarKubeConfig)
+ restConfig, err := common.GetClientConfig(kubeConfig)
+ if err != nil {
+ panic(err)
+ }
+ clientset := kubernetes.NewForConfigOrDie(restConfig)
+ server.StartGateway(&slack.EventListener{
+ Logger: common.NewArgoEventsLogger(),
+ K8sClient: clientset,
+ })
+}
diff --git a/gateways/community/slack/start.go b/gateways/server/slack/start.go
similarity index 54%
rename from gateways/community/slack/start.go
rename to gateways/server/slack/start.go
index 2e17250464..43cbbfdb00 100644
--- a/gateways/community/slack/start.go
+++ b/gateways/server/slack/start.go
@@ -24,49 +24,61 @@ import (
"github.com/argoproj/argo-events/common"
"github.com/argoproj/argo-events/gateways"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
"github.com/argoproj/argo-events/store"
+ "github.com/ghodss/yaml"
"github.com/nlopes/slack"
"github.com/nlopes/slack/slackevents"
"github.com/pkg/errors"
)
+// controller controls the webhook operations
var (
- helper = gwcommon.NewWebhookHelper()
+ controller = webhook.NewController()
)
+// set up the activation and inactivation channels to control the state of routes.
func init() {
- go gwcommon.InitRouteChannels(helper)
+ go webhook.ProcessRouteStatus(controller)
}
-func (rc *RouteConfig) GetRoute() *gwcommon.Route {
+// Implement Router
+// 1. GetRoute
+// 2. HandleRoute
+// 3. PostActivate
+// 4. PostDeactivate
+
+// GetRoute returns the route
+func (rc *Router) GetRoute() *webhook.Route {
return rc.route
}
-// RouteHandler handles new route
-func (rc *RouteConfig) RouteHandler(writer http.ResponseWriter, request *http.Request) {
- r := rc.route
+// HandleRoute handles incoming requests on the route
+func (rc *Router) HandleRoute(writer http.ResponseWriter, request *http.Request) {
+ route := rc.route
- log := r.Logger.WithFields(
+ logger := route.Logger.WithFields(
map[string]interface{}{
- common.LabelEventSource: r.EventSource.Name,
- common.LabelEndpoint: r.Webhook.Endpoint,
- common.LabelPort: r.Webhook.Port,
- common.LabelHTTPMethod: r.Webhook.Method,
+ common.LabelEventSource: route.EventSource.Name,
+ common.LabelEndpoint: route.Context.Endpoint,
+ common.LabelHTTPMethod: route.Context.Method,
})
- log.Info("request received")
+ logger.Info("request a received, processing it...")
- if !helper.ActiveEndpoints[r.Webhook.Endpoint].Active {
- log.Warn("endpoint is not active")
- common.SendErrorResponse(writer, "")
+ if !route.Active {
+ logger.Warn("endpoint is not active, won't process it")
+ common.SendErrorResponse(writer, "endpoint is inactive")
return
}
+ logger.Infoln("verifying the request...")
err := rc.verifyRequest(request)
if err != nil {
- log.WithError(err).Error("Failed validating request")
- common.SendInternalErrorResponse(writer, "")
+ logger.WithError(err).Error("failed to validate the request")
+ common.SendInternalErrorResponse(writer, err.Error())
return
}
@@ -75,40 +87,54 @@ func (rc *RouteConfig) RouteHandler(writer http.ResponseWriter, request *http.Re
// sent as application/x-www-form-urlencoded
// If request was generated by an interactive element, it will be a POST form
if len(request.Header["Content-Type"]) > 0 && request.Header["Content-Type"][0] == "application/x-www-form-urlencoded" {
+ logger.Infoln("handling slack interaction...")
data, err = rc.handleInteraction(request)
if err != nil {
- log.WithError(err).Error("Failed processing interaction")
- common.SendInternalErrorResponse(writer, "")
+ logger.WithError(err).Error("failed to process the interaction")
+ common.SendInternalErrorResponse(writer, err.Error())
return
}
} else {
// If there's no payload in the post body, this is likely an
// Event API request. Parse and process if valid.
+ logger.Infoln("handling slack event...")
var response []byte
data, response, err = rc.handleEvent(request)
if err != nil {
- log.WithError(err).Error("Failed processing event")
- common.SendInternalErrorResponse(writer, "")
+ logger.WithError(err).Error("failed to handle the event")
+ common.SendInternalErrorResponse(writer, err.Error())
return
}
if response != nil {
writer.Header().Set("Content-Type", "text")
if _, err := writer.Write(response); err != nil {
- log.WithError(err).Error("failed to write the response for url verification")
+ logger.WithError(err).Error("failed to write the response for url verification")
// don't return, we want to keep this running to give user chance to retry
}
}
}
if data != nil {
- helper.ActiveEndpoints[rc.route.Webhook.Endpoint].DataCh <- data
+ logger.Infoln("dispatching event on route's data channel...")
+ route.DataCh <- data
}
- log.Info("request successfully processed")
- common.SendSuccessResponse(writer, "")
+ logger.Info("request successfully processed")
+ common.SendSuccessResponse(writer, "success")
}
-func (rc *RouteConfig) handleEvent(request *http.Request) ([]byte, []byte, error) {
+// PostActivate performs operations once the route is activated and ready to consume requests
+func (rc *Router) PostActivate() error {
+ return nil
+}
+
+// PostInactivate performs operations after the route is inactivated
+func (rc *Router) PostInactivate() error {
+ return nil
+}
+
+// handleEvent parse the slack notification and validates the event type
+func (rc *Router) handleEvent(request *http.Request) ([]byte, []byte, error) {
var err error
var response []byte
var data []byte
@@ -141,7 +167,7 @@ func (rc *RouteConfig) handleEvent(request *http.Request) ([]byte, []byte, error
return data, response, nil
}
-func (rc *RouteConfig) handleInteraction(request *http.Request) ([]byte, error) {
+func (rc *Router) handleInteraction(request *http.Request) ([]byte, error) {
var err error
err = request.ParseForm()
if err != nil {
@@ -178,7 +204,7 @@ func getRequestBody(request *http.Request) ([]byte, error) {
// X-Slack-Signature header value.
// The signature is a hash generated as per Slack documentation at:
// https://api.slack.com/docs/verifying-requests-from-slack
-func (rc *RouteConfig) verifyRequest(request *http.Request) error {
+func (rc *Router) verifyRequest(request *http.Request) error {
signingSecret := rc.signingSecret
if len(signingSecret) > 0 {
sv, err := slack.NewSecretsVerifier(request.Header, signingSecret)
@@ -205,52 +231,43 @@ func (rc *RouteConfig) verifyRequest(request *http.Request) error {
return nil
}
-func (rc *RouteConfig) PostStart() error {
- return nil
-}
+// StartEventSource starts a event source
+func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
+ defer server.Recover(eventSource.Name)
-func (rc *RouteConfig) PostStop() error {
- return nil
-}
+ logger := listener.Logger.WithField(common.LabelEventSource, eventSource.Name)
-// StartEventSource starts a event source
-func (ese *SlackEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
- defer gateways.Recover(eventSource.Name)
+ logger.Infoln("started processing the event source...")
- log := ese.Log.WithField(common.LabelEventSource, eventSource.Name)
- log.Info("operating on event source")
+ logger.Infoln("parsing slack event source...")
- config, err := parseEventSource(eventSource.Data)
- if err != nil {
- log.WithError(err).Error("failed to parse event source")
+ var slackEventSource *v1alpha1.SlackEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &slackEventSource); err != nil {
+ logger.WithError(err).Errorln("failed to parse the event source")
return err
}
- ses := config.(*slackEventSource)
-
- token, err := store.GetSecrets(ese.Clientset, ese.Namespace, ses.Token.Name, ses.Token.Key)
+ logger.Infoln("retrieving the slack token...")
+ token, err := store.GetSecrets(listener.K8sClient, slackEventSource.Namespace, slackEventSource.Token.Name, slackEventSource.Token.Key)
if err != nil {
- log.WithError(err).Error("failed to retrieve token")
+ logger.WithError(err).Error("failed to retrieve the token")
return err
}
- signingSecret, err := store.GetSecrets(ese.Clientset, ese.Namespace, ses.SigningSecret.Name, ses.SigningSecret.Key)
+ logger.Infoln("retrieving the signing secret...")
+ signingSecret, err := store.GetSecrets(listener.K8sClient, slackEventSource.Namespace, slackEventSource.SigningSecret.Name, slackEventSource.SigningSecret.Key)
if err != nil {
- log.WithError(err).Warn("Signing secret not provided. Signature not validated.")
- signingSecret = ""
+ logger.WithError(err).Warn("failed to retrieve the signing secret")
+ return err
}
- return gwcommon.ProcessRoute(&RouteConfig{
- route: &gwcommon.Route{
- Logger: ese.Log,
- StartCh: make(chan struct{}),
- Webhook: ses.Hook,
- EventSource: eventSource,
- },
- token: token,
- signingSecret: signingSecret,
- clientset: ese.Clientset,
- namespace: ese.Namespace,
- ses: ses,
- }, helper, eventStream)
+ route := webhook.NewRoute(slackEventSource.Webhook, listener.Logger, eventSource)
+
+ return webhook.ManageRoute(&Router{
+ route: route,
+ token: token,
+ signingSecret: signingSecret,
+ k8sClient: listener.K8sClient,
+ slackEventSource: slackEventSource,
+ }, controller, eventStream)
}
diff --git a/gateways/community/slack/start_test.go b/gateways/server/slack/start_test.go
similarity index 75%
rename from gateways/community/slack/start_test.go
rename to gateways/server/slack/start_test.go
index 285f10e9cd..cc555760cb 100644
--- a/gateways/community/slack/start_test.go
+++ b/gateways/server/slack/start_test.go
@@ -29,7 +29,8 @@ import (
"testing"
"time"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
"github.com/ghodss/yaml"
"github.com/nlopes/slack/slackevents"
"github.com/smartystreets/goconvey/convey"
@@ -38,27 +39,25 @@ import (
func TestRouteActiveHandler(t *testing.T) {
convey.Convey("Given a route configuration", t, func() {
- rc := &RouteConfig{
- route: gwcommon.GetFakeRoute(),
- clientset: fake.NewSimpleClientset(),
- namespace: "fake",
- }
-
- helper.ActiveEndpoints[rc.route.Webhook.Endpoint] = &gwcommon.Endpoint{
- DataCh: make(chan []byte),
+ router := &Router{
+ route: webhook.GetFakeRoute(),
+ k8sClient: fake.NewSimpleClientset(),
+ slackEventSource: &v1alpha1.SlackEventSource{
+ Namespace: "fake",
+ },
}
convey.Convey("Inactive route should return 404", func() {
- writer := &gwcommon.FakeHttpWriter{}
- rc.RouteHandler(writer, &http.Request{})
+ writer := &webhook.FakeHttpWriter{}
+ router.HandleRoute(writer, &http.Request{})
convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusBadRequest)
})
- rc.token = "Jhj5dZrVaK7ZwHHjRyZWjbDl"
- helper.ActiveEndpoints[rc.route.Webhook.Endpoint].Active = true
+ router.token = "Jhj5dZrVaK7ZwHHjRyZWjbDl"
+ router.route.Active = true
convey.Convey("Test url verification request", func() {
- writer := &gwcommon.FakeHttpWriter{}
+ writer := &webhook.FakeHttpWriter{}
urlVer := slackevents.EventsAPIURLVerificationEvent{
Type: slackevents.URLVerification,
Token: "Jhj5dZrVaK7ZwHHjRyZWjbDl",
@@ -67,7 +66,7 @@ func TestRouteActiveHandler(t *testing.T) {
payload, err := yaml.Marshal(urlVer)
convey.So(err, convey.ShouldBeNil)
convey.So(payload, convey.ShouldNotBeNil)
- rc.RouteHandler(writer, &http.Request{
+ router.HandleRoute(writer, &http.Request{
Body: ioutil.NopCloser(bytes.NewReader(payload)),
})
convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusInternalServerError)
@@ -78,20 +77,22 @@ func TestRouteActiveHandler(t *testing.T) {
func TestSlackSignature(t *testing.T) {
convey.Convey("Given a route that receives a message from Slack", t, func() {
- rc := &RouteConfig{
- route: gwcommon.GetFakeRoute(),
- clientset: fake.NewSimpleClientset(),
- namespace: "fake",
+ router := &Router{
+ route: webhook.GetFakeRoute(),
+ k8sClient: fake.NewSimpleClientset(),
+ slackEventSource: &v1alpha1.SlackEventSource{
+ Namespace: "fake",
+ },
}
- rc.signingSecret = "abcdefghiklm1234567890"
+ router.signingSecret = "abcdefghiklm1234567890"
convey.Convey("Validate request signature", func() {
- writer := &gwcommon.FakeHttpWriter{}
+ writer := &webhook.FakeHttpWriter{}
payload := []byte("payload=%7B%22type%22%3A%22block_actions%22%2C%22team%22%3A%7B%22id%22%3A%22T0CAG%22%2C%22domain%22%3A%22acme-creamery%22%7D%2C%22user%22%3A%7B%22id%22%3A%22U0CA5%22%2C%22username%22%3A%22Amy%20McGee%22%2C%22name%22%3A%22Amy%20McGee%22%2C%22team_id%22%3A%22T3MDE%22%7D%2C%22api_app_id%22%3A%22A0CA5%22%2C%22token%22%3A%22Shh_its_a_seekrit%22%2C%22container%22%3A%7B%22type%22%3A%22message%22%2C%22text%22%3A%22The%20contents%20of%20the%20original%20message%20where%20the%20action%20originated%22%7D%2C%22trigger_id%22%3A%2212466734323.1395872398%22%2C%22response_url%22%3A%22https%3A%2F%2Fwww.postresponsestome.com%2FT123567%2F1509734234%22%2C%22actions%22%3A%5B%7B%22type%22%3A%22button%22%2C%22block_id%22%3A%22actionblock789%22%2C%22action_id%22%3A%2227S%22%2C%22text%22%3A%7B%22type%22%3A%22plain_text%22%2C%22text%22%3A%22Link%20Button%22%2C%22emoji%22%3Atrue%7D%2C%22action_ts%22%3A%221564701248.149432%22%7D%5D%7D")
h := make(http.Header)
rts := int(time.Now().UTC().UnixNano())
- hmac := hmac.New(sha256.New, []byte(rc.signingSecret))
+ hmac := hmac.New(sha256.New, []byte(router.signingSecret))
b := strings.Join([]string{"v0", strconv.Itoa(rts), string(payload)}, ":")
hmac.Write([]byte(b))
hash := hex.EncodeToString(hmac.Sum(nil))
@@ -100,16 +101,13 @@ func TestSlackSignature(t *testing.T) {
h.Add("X-Slack-Signature", genSig)
h.Add("X-Slack-Request-Timestamp", strconv.FormatInt(int64(rts), 10))
- helper.ActiveEndpoints[rc.route.Webhook.Endpoint] = &gwcommon.Endpoint{
- DataCh: make(chan []byte),
- }
- helper.ActiveEndpoints[rc.route.Webhook.Endpoint].Active = true
+ router.route.Active = true
go func() {
- <-helper.ActiveEndpoints[rc.route.Webhook.Endpoint].DataCh
+ <-router.route.DataCh
}()
- rc.RouteHandler(writer, &http.Request{
+ router.HandleRoute(writer, &http.Request{
Body: ioutil.NopCloser(bytes.NewReader(payload)),
Header: h,
Method: "POST",
@@ -122,25 +120,23 @@ func TestSlackSignature(t *testing.T) {
func TestInteractionHandler(t *testing.T) {
convey.Convey("Given a route that receives an interaction event", t, func() {
- rc := &RouteConfig{
- route: gwcommon.GetFakeRoute(),
- clientset: fake.NewSimpleClientset(),
- namespace: "fake",
+ router := &Router{
+ route: webhook.GetFakeRoute(),
+ k8sClient: fake.NewSimpleClientset(),
+ slackEventSource: &v1alpha1.SlackEventSource{
+ Namespace: "fake",
+ },
}
convey.Convey("Test an interaction action message", func() {
- writer := &gwcommon.FakeHttpWriter{}
+ writer := &webhook.FakeHttpWriter{}
actionString := `{"type":"block_actions","team":{"id":"T9TK3CUKW","domain":"example"},"user":{"id":"UA8RXUSPL","username":"jtorrance","team_id":"T9TK3CUKW"},"api_app_id":"AABA1ABCD","token":"9s8d9as89d8as9d8as989","container":{"type":"message_attachment","message_ts":"1548261231.000200","attachment_id":1,"channel_id":"CBR2V3XEX","is_ephemeral":false,"is_app_unfurl":false},"trigger_id":"12321423423.333649436676.d8c1bb837935619ccad0f624c448ffb3","channel":{"id":"CBR2V3XEX","name":"review-updates"},"message":{"bot_id":"BAH5CA16Z","type":"message","text":"This content can't be displayed.","user":"UAJ2RU415","ts":"1548261231.000200"},"response_url":"https://hooks.slack.com/actions/AABA1ABCD/1232321423432/D09sSasdasdAS9091209","actions":[{"action_id":"WaXA","block_id":"=qXel","text":{"type":"plain_text","text":"View","emoji":true},"value":"click_me_123","type":"button","action_ts":"1548426417.840180"}]}`
payload := []byte(`payload=` + actionString)
out := make(chan []byte)
-
- helper.ActiveEndpoints[rc.route.Webhook.Endpoint] = &gwcommon.Endpoint{
- DataCh: make(chan []byte),
- }
- helper.ActiveEndpoints[rc.route.Webhook.Endpoint].Active = true
+ router.route.Active = true
go func() {
- out <- <-helper.ActiveEndpoints[rc.route.Webhook.Endpoint].DataCh
+ out <- <-router.route.DataCh
}()
var buf bytes.Buffer
@@ -148,7 +144,7 @@ func TestInteractionHandler(t *testing.T) {
headers := make(map[string][]string)
headers["Content-Type"] = append(headers["Content-Type"], "application/x-www-form-urlencoded")
- rc.RouteHandler(writer, &http.Request{
+ router.HandleRoute(writer, &http.Request{
Method: http.MethodPost,
Header: headers,
Body: ioutil.NopCloser(strings.NewReader(buf.String())),
@@ -164,14 +160,16 @@ func TestInteractionHandler(t *testing.T) {
func TestEventHandler(t *testing.T) {
convey.Convey("Given a route that receives an event", t, func() {
- rc := &RouteConfig{
- route: gwcommon.GetFakeRoute(),
- clientset: fake.NewSimpleClientset(),
- namespace: "fake",
+ router := &Router{
+ route: webhook.GetFakeRoute(),
+ k8sClient: fake.NewSimpleClientset(),
+ slackEventSource: &v1alpha1.SlackEventSource{
+ Namespace: "fake",
+ },
}
convey.Convey("Test an event notification", func() {
- writer := &gwcommon.FakeHttpWriter{}
+ writer := &webhook.FakeHttpWriter{}
event := []byte(`
{
"type": "name_of_event",
@@ -197,16 +195,13 @@ func TestEventHandler(t *testing.T) {
payload, err := yaml.Marshal(ce)
convey.So(err, convey.ShouldBeNil)
- helper.ActiveEndpoints[rc.route.Webhook.Endpoint] = &gwcommon.Endpoint{
- DataCh: make(chan []byte),
- }
- helper.ActiveEndpoints[rc.route.Webhook.Endpoint].Active = true
+ router.route.Active = true
go func() {
- <-helper.ActiveEndpoints[rc.route.Webhook.Endpoint].DataCh
+ <-router.route.DataCh
}()
- rc.RouteHandler(writer, &http.Request{
+ router.HandleRoute(writer, &http.Request{
Body: ioutil.NopCloser(bytes.NewBuffer(payload)),
})
convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusInternalServerError)
diff --git a/gateways/server/slack/types.go b/gateways/server/slack/types.go
new file mode 100644
index 0000000000..f171147682
--- /dev/null
+++ b/gateways/server/slack/types.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package slack
+
+import (
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/sirupsen/logrus"
+ "k8s.io/client-go/kubernetes"
+)
+
+// EventListener implements Eventing for slack event source
+type EventListener struct {
+ // K8sClient is kubernetes client
+ K8sClient kubernetes.Interface
+ // Logger logs stuff
+ Logger *logrus.Logger
+}
+
+// Router contains information about a REST endpoint
+type Router struct {
+ // route holds information to process an incoming request
+ route *webhook.Route
+ // slackEventSource is the event source which refers to configuration required to consume events from slack
+ slackEventSource *v1alpha1.SlackEventSource
+ // token is the slack token
+ token string
+ // refer to https://api.slack.com/docs/verifying-requests-from-slack
+ signingSecret string
+ // k8sClient is the Kubernetes client
+ k8sClient kubernetes.Interface
+}
diff --git a/gateways/server/slack/validate.go b/gateways/server/slack/validate.go
new file mode 100644
index 0000000000..2a4fb9b01c
--- /dev/null
+++ b/gateways/server/slack/validate.go
@@ -0,0 +1,70 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package slack
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+)
+
+// ValidateEventSource validates slack event source
+func (listener *EventListener) ValidateEventSource(ctx context.Context, eventSource *gateways.EventSource) (*gateways.ValidEventSource, error) {
+ if apicommon.EventSourceType(eventSource.Type) != apicommon.SlackEvent {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: common.ErrEventSourceTypeMismatch(string(apicommon.SlackEvent)),
+ }, nil
+ }
+
+ var slackEventSource *v1alpha1.SlackEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &slackEventSource); err != nil {
+ listener.Logger.WithError(err).Errorln("failed to parse the event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ if err := validate(slackEventSource); err != nil {
+ listener.Logger.WithError(err).Errorln("failed to validate the event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ return &gateways.ValidEventSource{
+ IsValid: true,
+ }, nil
+}
+
+func validate(eventSource *v1alpha1.SlackEventSource) error {
+ if eventSource == nil {
+ return common.ErrNilEventSource
+ }
+ if eventSource.Token == nil {
+ return fmt.Errorf("token not provided")
+ }
+ return webhook.ValidateWebhookContext(eventSource.Webhook)
+}
diff --git a/gateways/server/slack/validate_test.go b/gateways/server/slack/validate_test.go
new file mode 100644
index 0000000000..6337b98fd5
--- /dev/null
+++ b/gateways/server/slack/validate_test.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package slack
+
+import (
+ "context"
+ "fmt"
+ "github.com/stretchr/testify/assert"
+ "io/ioutil"
+ "testing"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+)
+
+func TestSlackEventSource(t *testing.T) {
+ listener := &EventListener{}
+
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "slack",
+ Value: nil,
+ Type: "sq",
+ })
+ assert.Equal(t, false, valid.IsValid)
+ assert.Equal(t, common.ErrEventSourceTypeMismatch("slack"), valid.Reason)
+
+ content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gateways.EventSourceDir, "slack.yaml"))
+ assert.Nil(t, err)
+
+ var eventSource *v1alpha1.EventSource
+ err = yaml.Unmarshal(content, &eventSource)
+ assert.Nil(t, err)
+
+ for name, value := range eventSource.Spec.Slack {
+ fmt.Println(name)
+ content, err := yaml.Marshal(value)
+ assert.Nil(t, err)
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "slack",
+ Value: content,
+ Type: "slack",
+ })
+ fmt.Println(valid.Reason)
+ assert.Equal(t, true, valid.IsValid)
+ }
+}
diff --git a/gateways/community/storagegrid/Dockerfile b/gateways/server/storagegrid/Dockerfile
similarity index 100%
rename from gateways/community/storagegrid/Dockerfile
rename to gateways/server/storagegrid/Dockerfile
diff --git a/gateways/core/stream/kafka/cmd/main.go b/gateways/server/storagegrid/cmd/main.go
similarity index 76%
rename from gateways/core/stream/kafka/cmd/main.go
rename to gateways/server/storagegrid/cmd/main.go
index 865ae21208..59cf7c2a67 100644
--- a/gateways/core/stream/kafka/cmd/main.go
+++ b/gateways/server/storagegrid/cmd/main.go
@@ -18,12 +18,12 @@ package main
import (
"github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "github.com/argoproj/argo-events/gateways/core/stream/kafka"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/storagegrid"
)
func main() {
- gateways.StartGateway(&kafka.KafkaEventSourceExecutor{
- Log: common.NewArgoEventsLogger(),
+ server.StartGateway(&storagegrid.EventListener{
+ Logger: common.NewArgoEventsLogger(),
})
}
diff --git a/gateways/server/storagegrid/start.go b/gateways/server/storagegrid/start.go
new file mode 100644
index 0000000000..fccc71f642
--- /dev/null
+++ b/gateways/server/storagegrid/start.go
@@ -0,0 +1,199 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package storagegrid
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/google/uuid"
+ "github.com/joncalhoun/qson"
+)
+
+// controller controls the webhook operations
+var (
+ controller = webhook.NewController()
+)
+
+var (
+ respBody = `
+
+
+ ` + generateUUID().String() + `
+
+
+ ` + generateUUID().String() + `
+
+` + "\n"
+)
+
+// set up the activation and inactivation channels to control the state of routes.
+func init() {
+ go webhook.ProcessRouteStatus(controller)
+}
+
+// generateUUID returns a new uuid
+func generateUUID() uuid.UUID {
+ return uuid.New()
+}
+
+// filterEvent filters notification based on event filter in a gateway configuration
+func filterEvent(notification *storageGridNotification, eventSource *v1alpha1.StorageGridEventSource) bool {
+ if eventSource.Events == nil {
+ return true
+ }
+ for _, filterEvent := range eventSource.Events {
+ if notification.Message.Records[0].EventName == filterEvent {
+ return true
+ }
+ }
+ return false
+}
+
+// filterName filters object key based on configured prefix and/or suffix
+func filterName(notification *storageGridNotification, eventSource *v1alpha1.StorageGridEventSource) bool {
+ if eventSource.Filter == nil {
+ return true
+ }
+ if eventSource.Filter.Prefix != "" && eventSource.Filter.Suffix != "" {
+ return strings.HasPrefix(notification.Message.Records[0].S3.Object.Key, eventSource.Filter.Prefix) && strings.HasSuffix(notification.Message.Records[0].S3.Object.Key, eventSource.Filter.Suffix)
+ }
+ if eventSource.Filter.Prefix != "" {
+ return strings.HasPrefix(notification.Message.Records[0].S3.Object.Key, eventSource.Filter.Prefix)
+ }
+ if eventSource.Filter.Suffix != "" {
+ return strings.HasSuffix(notification.Message.Records[0].S3.Object.Key, eventSource.Filter.Suffix)
+ }
+ return true
+}
+
+// Implement Router
+// 1. GetRoute
+// 2. HandleRoute
+// 3. PostActivate
+// 4. PostDeactivate
+
+// GetRoute returns the route
+func (router *Router) GetRoute() *webhook.Route {
+ return router.route
+}
+
+// HandleRoute handles new route
+func (router *Router) HandleRoute(writer http.ResponseWriter, request *http.Request) {
+ route := router.route
+
+ logger := route.Logger.WithFields(
+ map[string]interface{}{
+ common.LabelEventSource: route.EventSource.Name,
+ common.LabelEndpoint: route.Context.Endpoint,
+ common.LabelPort: route.Context.Port,
+ common.LabelHTTPMethod: route.Context.Method,
+ })
+
+ logger.Infoln("processing incoming request...")
+
+ if !route.Active {
+ logger.Warnln("endpoint is inactive, won't process the request")
+ common.SendErrorResponse(writer, "inactive endpoint")
+ return
+ }
+
+ logger.Infoln("parsing the request body...")
+ body, err := ioutil.ReadAll(request.Body)
+ if err != nil {
+ logger.WithError(err).Errorln("failed to parse request body")
+ common.SendErrorResponse(writer, "")
+ return
+ }
+
+ switch request.Method {
+ case http.MethodHead:
+ respBody = ""
+ }
+ writer.WriteHeader(http.StatusOK)
+ writer.Header().Add("Content-Type", "text/plain")
+ writer.Write([]byte(respBody))
+
+ // notification received from storage grid is url encoded.
+ parsedURL, err := url.QueryUnescape(string(body))
+ if err != nil {
+ logger.WithError(err).Errorln("failed to unescape request body url")
+ return
+ }
+ b, err := qson.ToJSON(parsedURL)
+ if err != nil {
+ logger.WithError(err).Errorln("failed to convert request body in JSON format")
+ return
+ }
+
+ logger.Infoln("converting request body to storage grid notification")
+ var notification *storageGridNotification
+ err = json.Unmarshal(b, ¬ification)
+ if err != nil {
+ logger.WithError(err).Errorln("failed to convert the request body into storage grid notification")
+ return
+ }
+
+ if filterEvent(notification, router.storageGridEventSource) && filterName(notification, router.storageGridEventSource) {
+ logger.WithError(err).Errorln("new event received, dispatching event on route's data channel")
+ route.DataCh <- b
+ return
+ }
+
+ logger.Warnln("discarding notification since it did not pass all filters")
+}
+
+// PostActivate performs operations once the route is activated and ready to consume requests
+func (router *Router) PostActivate() error {
+ return nil
+}
+
+// PostInactivate performs operations after the route is inactivated
+func (router *Router) PostInactivate() error {
+ return nil
+}
+
+// StartConfig runs a configuration
+func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
+ defer server.Recover(eventSource.Name)
+
+ log := listener.Logger.WithField(common.LabelEventSource, eventSource.Name)
+
+ log.Info("started processing the event source...")
+
+ var storagegridEventSource *v1alpha1.StorageGridEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &storagegridEventSource); err != nil {
+ log.WithError(err).Errorln("failed to parse the event source")
+ return err
+ }
+
+ route := webhook.NewRoute(storagegridEventSource.Webhook, listener.Logger, eventSource)
+
+ return webhook.ManageRoute(&Router{
+ route: route,
+ storageGridEventSource: storagegridEventSource,
+ }, controller, eventStream)
+}
diff --git a/gateways/community/storagegrid/start_test.go b/gateways/server/storagegrid/start_test.go
similarity index 61%
rename from gateways/community/storagegrid/start_test.go
rename to gateways/server/storagegrid/start_test.go
index e0d7e040e5..bc0031d86d 100644
--- a/gateways/community/storagegrid/start_test.go
+++ b/gateways/server/storagegrid/start_test.go
@@ -19,12 +19,14 @@ package storagegrid
import (
"bytes"
"encoding/json"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
"io/ioutil"
"net/http"
"testing"
+
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/smartystreets/goconvey/convey"
)
var (
@@ -35,7 +37,7 @@ var (
"Records": [
{
"eventName": "ObjectCreated:Put",
- "eventSource": "sgws:s3",
+ "storageGridEventSource": "sgws:s3",
"eventTime": "2019-02-27T21:15:09Z",
"eventVersion": "2.0",
"requestParameters": {
@@ -71,40 +73,49 @@ var (
"Version": "2010-03-31"
}
`
- rc = &RouteConfig{
- route: gwcommon.GetFakeRoute(),
+ router = &Router{
+ route: webhook.GetFakeRoute(),
}
)
func TestRouteActiveHandler(t *testing.T) {
convey.Convey("Given a route configuration", t, func() {
- helper.ActiveEndpoints[rc.route.Webhook.Endpoint] = &gwcommon.Endpoint{
- DataCh: make(chan []byte),
+ storageGridEventSource := &v1alpha1.StorageGridEventSource{
+ Webhook: &webhook.Context{
+ Endpoint: "/",
+ URL: "testurl",
+ Port: "8080",
+ },
+ Events: []string{
+ "ObjectCreated:Put",
+ },
+ Filter: &v1alpha1.StorageGridFilter{
+ Prefix: "hello-",
+ Suffix: ".txt",
+ },
}
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- writer := &gwcommon.FakeHttpWriter{}
+ writer := &webhook.FakeHttpWriter{}
convey.Convey("Inactive route should return error", func() {
- pbytes, err := yaml.Marshal(ps.(*storageGridEventSource))
+ pbytes, err := yaml.Marshal(storageGridEventSource)
convey.So(err, convey.ShouldBeNil)
- rc.RouteHandler(writer, &http.Request{
+ router.HandleRoute(writer, &http.Request{
Body: ioutil.NopCloser(bytes.NewReader(pbytes)),
})
convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusBadRequest)
})
convey.Convey("Active route should return success", func() {
- helper.ActiveEndpoints[rc.route.Webhook.Endpoint].Active = true
- rc.sges = ps.(*storageGridEventSource)
+ router.route.Active = true
+ router.storageGridEventSource = storageGridEventSource
dataCh := make(chan []byte)
go func() {
- resp := <-helper.ActiveEndpoints[rc.route.Webhook.Endpoint].DataCh
+ resp := <-router.route.DataCh
dataCh <- resp
}()
- rc.RouteHandler(writer, &http.Request{
+ router.HandleRoute(writer, &http.Request{
Body: ioutil.NopCloser(bytes.NewReader([]byte(notification))),
})
convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusOK)
@@ -122,28 +133,52 @@ func TestGenerateUUID(t *testing.T) {
func TestFilterEvent(t *testing.T) {
convey.Convey("Given a storage grid event, test whether it passes the filter", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- var sg *storageGridNotification
- err = json.Unmarshal([]byte(notification), &sg)
+ storageGridEventSource := &v1alpha1.StorageGridEventSource{
+ Webhook: &webhook.Context{
+ Endpoint: "/",
+ URL: "testurl",
+ Port: "8080",
+ },
+ Events: []string{
+ "ObjectCreated:Put",
+ },
+ Filter: &v1alpha1.StorageGridFilter{
+ Prefix: "hello-",
+ Suffix: ".txt",
+ },
+ }
+ var gridNotification *storageGridNotification
+ err := json.Unmarshal([]byte(notification), &gridNotification)
convey.So(err, convey.ShouldBeNil)
- convey.So(sg, convey.ShouldNotBeNil)
+ convey.So(gridNotification, convey.ShouldNotBeNil)
- ok := filterEvent(sg, ps.(*storageGridEventSource))
+ ok := filterEvent(gridNotification, storageGridEventSource)
convey.So(ok, convey.ShouldEqual, true)
})
}
func TestFilterName(t *testing.T) {
convey.Convey("Given a storage grid event, test whether the object key passes the filter", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- var sg *storageGridNotification
- err = json.Unmarshal([]byte(notification), &sg)
+ storageGridEventSource := &v1alpha1.StorageGridEventSource{
+ Webhook: &webhook.Context{
+ Endpoint: "/",
+ URL: "testurl",
+ Port: "8080",
+ },
+ Events: []string{
+ "ObjectCreated:Put",
+ },
+ Filter: &v1alpha1.StorageGridFilter{
+ Prefix: "hello-",
+ Suffix: ".txt",
+ },
+ }
+ var gridNotification *storageGridNotification
+ err := json.Unmarshal([]byte(notification), &gridNotification)
convey.So(err, convey.ShouldBeNil)
- convey.So(sg, convey.ShouldNotBeNil)
+ convey.So(gridNotification, convey.ShouldNotBeNil)
- ok := filterName(sg, ps.(*storageGridEventSource))
+ ok := filterName(gridNotification, storageGridEventSource)
convey.So(ok, convey.ShouldEqual, true)
})
}
diff --git a/gateways/community/storagegrid/config.go b/gateways/server/storagegrid/types.go
similarity index 57%
rename from gateways/community/storagegrid/config.go
rename to gateways/server/storagegrid/types.go
index e856b29402..e7cd5a6eb2 100644
--- a/gateways/community/storagegrid/config.go
+++ b/gateways/server/storagegrid/types.go
@@ -17,49 +17,25 @@ limitations under the License.
package storagegrid
import (
- "github.com/sirupsen/logrus"
- "net/http"
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
"time"
- gwcommon "github.com/argoproj/argo-events/gateways/common"
- "github.com/ghodss/yaml"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/sirupsen/logrus"
)
-const ArgoEventsEventSourceVersion = "v0.11"
-
-// StorageGridEventSourceExecutor implements Eventing
-type StorageGridEventSourceExecutor struct {
- Log *logrus.Logger
-}
-
-type RouteConfig struct {
- route *gwcommon.Route
- sges *storageGridEventSource
-}
-
-// storageGridEventSource contains configuration for storage grid sns
-type storageGridEventSource struct {
- // Webhook
- Hook *gwcommon.Webhook `json:"hook"`
-
- // Events are s3 bucket notification events.
- // For more information on s3 notifications, follow https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
- // Note that storage grid notifications do not contain `s3:`
- Events []string `json:"events,omitempty"`
-
- // Filter on object key which caused the notification.
- Filter *Filter `json:"filter,omitempty"`
-
- // srv holds reference to http server
- srv *http.Server
- mux *http.ServeMux
+// EventListener implements Eventing for storage grid events
+type EventListener struct {
+ // Logger logs stuff
+ Logger *logrus.Logger
}
-// Filter represents filters to apply to bucket notifications for specifying constraints on objects
-// +k8s:openapi-gen=true
-type Filter struct {
- Prefix string `json:"prefix"`
- Suffix string `json:"suffix"`
+// Router manages route
+type Router struct {
+ // route contains configuration of a REST endpoint
+ route *webhook.Route
+ // storageGridEventSource refers to event source which contains configuration to consume events from storage grid
+ storageGridEventSource *v1alpha1.StorageGridEventSource
}
// storageGridNotification is the bucket notification received from storage grid
@@ -68,7 +44,7 @@ type storageGridNotification struct {
Message struct {
Records []struct {
EventVersion string `json:"eventVersion"`
- EventSource string `json:"eventSource"`
+ EventSource string `json:"storageGridEventSource"`
EventTime time.Time `json:"eventTime"`
EventName string `json:"eventName"`
UserIdentity struct {
@@ -102,12 +78,3 @@ type storageGridNotification struct {
TopicArn string `json:"TopicArn"`
Version string `json:"Version"`
}
-
-func parseEventSource(eventSource string) (interface{}, error) {
- var s *storageGridEventSource
- err := yaml.Unmarshal([]byte(eventSource), &s)
- if err != nil {
- return nil, err
- }
- return s, err
-}
diff --git a/gateways/server/storagegrid/validate.go b/gateways/server/storagegrid/validate.go
new file mode 100644
index 0000000000..a1120f3500
--- /dev/null
+++ b/gateways/server/storagegrid/validate.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package storagegrid
+
+import (
+ "context"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+)
+
+// ValidateEventSource validates storage grid event source
+func (listener *EventListener) ValidateEventSource(ctx context.Context, eventSource *gateways.EventSource) (*gateways.ValidEventSource, error) {
+ if apicommon.EventSourceType(eventSource.Type) != apicommon.StorageGridEvent {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: common.ErrEventSourceTypeMismatch(string(apicommon.StorageGridEvent)),
+ }, nil
+ }
+
+ var storageGridEventSource *v1alpha1.StorageGridEventSource
+ if err := yaml.Unmarshal(eventSource.Value, &storageGridEventSource); err != nil {
+ listener.Logger.WithError(err).Error("failed to parse the event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ if err := validate(storageGridEventSource); err != nil {
+ listener.Logger.WithError(err).Error("failed to validate storage grid event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ return &gateways.ValidEventSource{
+ IsValid: true,
+ }, nil
+}
+
+func validate(eventSource *v1alpha1.StorageGridEventSource) error {
+ if eventSource == nil {
+ return common.ErrNilEventSource
+ }
+ return webhook.ValidateWebhookContext(eventSource.Webhook)
+}
diff --git a/gateways/server/storagegrid/validate_test.go b/gateways/server/storagegrid/validate_test.go
new file mode 100644
index 0000000000..6de5e82f6c
--- /dev/null
+++ b/gateways/server/storagegrid/validate_test.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package storagegrid
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "testing"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestEventListener_ValidateEventSource(t *testing.T) {
+ listener := &EventListener{
+ Logger: common.NewArgoEventsLogger(),
+ }
+
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "storagegrid",
+ Value: nil,
+ Type: "sq",
+ })
+ assert.Equal(t, false, valid.IsValid)
+ assert.Equal(t, common.ErrEventSourceTypeMismatch("storagegrid"), valid.Reason)
+
+ content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gateways.EventSourceDir, "storage-grid.yaml"))
+ assert.Nil(t, err)
+
+ var eventSource *v1alpha1.EventSource
+ err = yaml.Unmarshal(content, &eventSource)
+ assert.Nil(t, err)
+
+ for name, value := range eventSource.Spec.StorageGrid {
+ fmt.Println(name)
+ content, err := yaml.Marshal(value)
+ assert.Nil(t, err)
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "storagegrid",
+ Value: content,
+ Type: "storagegrid",
+ })
+ fmt.Println(valid.Reason)
+ assert.Equal(t, true, valid.IsValid)
+ }
+}
diff --git a/gateways/utils.go b/gateways/server/utils.go
similarity index 98%
rename from gateways/utils.go
rename to gateways/server/utils.go
index 2e70adea15..656482ab9a 100644
--- a/gateways/utils.go
+++ b/gateways/server/utils.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package gateways
+package server
import (
"github.com/argoproj/argo-events/common"
diff --git a/gateways/utils_test.go b/gateways/server/utils_test.go
similarity index 98%
rename from gateways/utils_test.go
rename to gateways/server/utils_test.go
index 926f26e162..2f59845c12 100644
--- a/gateways/utils_test.go
+++ b/gateways/server/utils_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package gateways
+package server
import (
"github.com/smartystreets/goconvey/convey"
diff --git a/gateways/core/webhook/Dockerfile b/gateways/server/webhook/Dockerfile
similarity index 100%
rename from gateways/core/webhook/Dockerfile
rename to gateways/server/webhook/Dockerfile
diff --git a/gateways/server/webhook/cmd/main.go b/gateways/server/webhook/cmd/main.go
new file mode 100644
index 0000000000..c949687750
--- /dev/null
+++ b/gateways/server/webhook/cmd/main.go
@@ -0,0 +1,29 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/webhook"
+)
+
+func main() {
+ server.StartGateway(&webhook.EventListener{
+ Logger: common.NewArgoEventsLogger(),
+ })
+}
diff --git a/gateways/server/webhook/start.go b/gateways/server/webhook/start.go
new file mode 100644
index 0000000000..77d2e8e4f3
--- /dev/null
+++ b/gateways/server/webhook/start.go
@@ -0,0 +1,145 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webhook
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server"
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ "github.com/ghodss/yaml"
+ "github.com/sirupsen/logrus"
+)
+
+// EventListener implements Eventing for webhook events
+type EventListener struct {
+ // Logger logs stuff
+ Logger *logrus.Logger
+}
+
+// Router contains the configuration information for a route
+type Router struct {
+ // route contains information about a API endpoint
+ route *webhook.Route
+}
+
+// controller controls the webhook operations
+var (
+ controller = webhook.NewController()
+)
+
+// set up the activation and inactivation channels to control the state of routes.
+func init() {
+ go webhook.ProcessRouteStatus(controller)
+}
+
+// webhook event payload
+type payload struct {
+ // Header is the http request header
+ Header http.Header `json:"header"`
+ // Body is http request body
+ Body []byte `json:"body"`
+}
+
+// Implement Router
+// 1. GetRoute
+// 2. HandleRoute
+// 3. PostActivate
+// 4. PostDeactivate
+
+// GetRoute returns the route
+func (router *Router) GetRoute() *webhook.Route {
+ return router.route
+}
+
+// HandleRoute handles incoming requests on the route
+func (router *Router) HandleRoute(writer http.ResponseWriter, request *http.Request) {
+ route := router.route
+
+ logger := route.Logger.WithFields(
+ map[string]interface{}{
+ common.LabelEventSource: route.EventSource.Name,
+ common.LabelEndpoint: route.Context.Endpoint,
+ common.LabelPort: route.Context.Port,
+ common.LabelHTTPMethod: route.Context.Method,
+ })
+
+ logger.Info("a request received, processing it...")
+
+ if !route.Active {
+ logger.Info("endpoint is not active, wont't process the request")
+ common.SendErrorResponse(writer, "endpoint is inactive")
+ return
+ }
+
+ body, err := ioutil.ReadAll(request.Body)
+ if err != nil {
+ logger.WithError(err).Error("failed to parse request body")
+ common.SendErrorResponse(writer, err.Error())
+ return
+ }
+
+ data, err := json.Marshal(&payload{
+ Header: request.Header,
+ Body: body,
+ })
+ if err != nil {
+ logger.WithError(err).Error("failed to construct the event payload")
+ common.SendErrorResponse(writer, err.Error())
+ return
+ }
+
+ logger.Infoln("dispatching event on route's data channel...")
+ route.DataCh <- data
+ logger.Info("successfully processed the request")
+ common.SendSuccessResponse(writer, "success")
+}
+
+// PostActivate performs operations once the route is activated and ready to consume requests
+func (router *Router) PostActivate() error {
+ return nil
+}
+
+// PostInactivate performs operations after the route is inactivated
+func (router *Router) PostInactivate() error {
+ return nil
+}
+
+// StartEventSource starts a event source
+func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {
+ defer server.Recover(eventSource.Name)
+
+ log := listener.Logger.WithField(common.LabelEventSource, eventSource.Name)
+
+ log.Info("started operating on the event source...")
+
+ var webhookEventSource *webhook.Context
+ if err := yaml.Unmarshal(eventSource.Value, &webhookEventSource); err != nil {
+ log.WithError(err).Error("failed to parse the event source")
+ return err
+ }
+
+ route := webhook.NewRoute(webhookEventSource, listener.Logger, eventSource)
+
+ return webhook.ManageRoute(&Router{
+ route: route,
+ }, controller, eventStream)
+}
diff --git a/gateways/server/webhook/validate.go b/gateways/server/webhook/validate.go
new file mode 100644
index 0000000000..51d946def6
--- /dev/null
+++ b/gateways/server/webhook/validate.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webhook
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ "github.com/ghodss/yaml"
+)
+
+// ValidateEventSource validates webhook event source
+func (listener *EventListener) ValidateEventSource(ctx context.Context, eventSource *gateways.EventSource) (*gateways.ValidEventSource, error) {
+ if apicommon.EventSourceType(eventSource.Type) != apicommon.WebhookEvent {
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: common.ErrEventSourceTypeMismatch(string(apicommon.WebhookEvent)),
+ }, nil
+ }
+
+ var webhookEventSource *webhook.Context
+ if err := yaml.Unmarshal(eventSource.Value, &webhookEventSource); err != nil {
+ listener.Logger.WithError(err).Error("failed to parse the event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ if err := validate(webhookEventSource); err != nil {
+ listener.Logger.WithError(err).Error("failed to validate the webhook event source")
+ return &gateways.ValidEventSource{
+ IsValid: false,
+ Reason: err.Error(),
+ }, nil
+ }
+
+ return &gateways.ValidEventSource{
+ IsValid: true,
+ }, nil
+}
+
+func validate(webhookEventSource *webhook.Context) error {
+ if webhookEventSource == nil {
+ return common.ErrNilEventSource
+ }
+
+ switch webhookEventSource.Method {
+ case http.MethodHead, http.MethodPut, http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodTrace:
+ default:
+ return fmt.Errorf("unknown HTTP method %s", webhookEventSource.Method)
+ }
+
+ return webhook.ValidateWebhookContext(webhookEventSource)
+}
diff --git a/gateways/server/webhook/validate_test.go b/gateways/server/webhook/validate_test.go
new file mode 100644
index 0000000000..2e4d1f7512
--- /dev/null
+++ b/gateways/server/webhook/validate_test.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webhook
+
+import (
+ "context"
+ "fmt"
+ "github.com/stretchr/testify/assert"
+ "io/ioutil"
+ "testing"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways"
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/ghodss/yaml"
+)
+
+func TestValidateEventSource(t *testing.T) {
+ listener := &EventListener{}
+
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "webhook",
+ Value: nil,
+ Type: "sq",
+ })
+ assert.Equal(t, false, valid.IsValid)
+ assert.Equal(t, common.ErrEventSourceTypeMismatch("webhook"), valid.Reason)
+
+ content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", gateways.EventSourceDir, "webhook.yaml"))
+ assert.Nil(t, err)
+
+ var eventSource *v1alpha1.EventSource
+ err = yaml.Unmarshal(content, &eventSource)
+ assert.Nil(t, err)
+
+ for name, value := range eventSource.Spec.Webhook {
+ fmt.Println(name)
+ content, err := yaml.Marshal(value)
+ assert.Nil(t, err)
+ valid, _ := listener.ValidateEventSource(context.Background(), &gateways.EventSource{
+ Id: "1",
+ Name: "webhook",
+ Value: content,
+ Type: "webhook",
+ })
+ fmt.Println(valid.Reason)
+ assert.Equal(t, true, valid.IsValid)
+ }
+}
diff --git a/gateways/transformer.go b/gateways/transformer.go
deleted file mode 100644
index 55433938bc..0000000000
--- a/gateways/transformer.go
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package gateways
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "net"
- "net/http"
- "time"
-
- "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
-
- "github.com/argoproj/argo-events/common"
- apicommon "github.com/argoproj/argo-events/pkg/apis/common"
- pc "github.com/argoproj/argo-events/pkg/apis/common"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "github.com/google/uuid"
-)
-
-// TransformerPayload contains payload of cloudevents.
-type TransformerPayload struct {
- // Src contains information about which specific configuration in gateway generated the event
- Src string `json:"src"`
- // Payload is event data
- Payload []byte `json:"payload"`
-}
-
-// DispatchEvent dispatches event to gateway transformer for further processing
-func (gc *GatewayConfig) DispatchEvent(gatewayEvent *Event) error {
- transformedEvent, err := gc.transformEvent(gatewayEvent)
- if err != nil {
- return err
- }
-
- payload, err := json.Marshal(transformedEvent)
- if err != nil {
- return fmt.Errorf("failed to dispatch event to watchers over http. marshalling failed. err: %+v", err)
- }
-
- switch gc.gw.Spec.EventProtocol.Type {
- case pc.HTTP:
- if err = gc.dispatchEventOverHttp(transformedEvent.Context.Source.Host, payload); err != nil {
- return err
- }
- case pc.NATS:
- if err = gc.dispatchEventOverNats(transformedEvent.Context.Source.Host, payload); err != nil {
- return err
- }
- default:
- return fmt.Errorf("unknown dispatch mechanism %s", gc.gw.Spec.EventProtocol.Type)
- }
- return nil
-}
-
-// transformEvent transforms an event from event source into a CloudEvents specification compliant event
-// See https://github.com/cloudevents/spec for more info.
-func (gc *GatewayConfig) transformEvent(gatewayEvent *Event) (*apicommon.Event, error) {
- // Generate an event id
- eventId := uuid.New()
-
- gc.Log.WithField(common.LabelEventSource, gatewayEvent.Name).Info("converting gateway event into cloudevents specification compliant event")
-
- // Create an CloudEvent
- ce := &apicommon.Event{
- Context: apicommon.EventContext{
- CloudEventsVersion: common.CloudEventsVersion,
- EventID: eventId.String(),
- ContentType: "application/json",
- EventTime: metav1.MicroTime{Time: time.Now().UTC()},
- EventType: gc.gw.Spec.Type,
- EventTypeVersion: v1alpha1.ArgoEventsGatewayVersion,
- Source: &apicommon.URI{
- Host: common.DefaultEventSourceName(gc.gw.Name, gatewayEvent.Name),
- },
- },
- Payload: gatewayEvent.Payload,
- }
-
- gc.Log.WithField(common.LabelGatewayName, gatewayEvent.Name).Info("event has been transformed into cloud event")
- return ce, nil
-}
-
-// dispatchEventOverHttp dispatches event to watchers over http.
-func (gc *GatewayConfig) dispatchEventOverHttp(source string, eventPayload []byte) error {
- gc.Log.WithField(common.LabelEventSource, source).Info("dispatching event to watchers")
-
- completeSuccess := true
-
- for _, sensor := range gc.gw.Spec.Watchers.Sensors {
- namespace := gc.Namespace
- if sensor.Namespace != "" {
- namespace = sensor.Namespace
- }
- if err := gc.postCloudEventToWatcher(common.ServiceDNSName(sensor.Name, namespace), gc.gw.Spec.EventProtocol.Http.Port, common.SensorServiceEndpoint, eventPayload); err != nil {
- gc.Log.WithField(common.LabelSensorName, sensor.Name).WithError(err).Warn("failed to dispatch event to sensor watcher over http. communication error")
- completeSuccess = false
- }
- }
- for _, gateway := range gc.gw.Spec.Watchers.Gateways {
- namespace := gc.Namespace
- if gateway.Namespace != "" {
- namespace = gateway.Namespace
- }
- if err := gc.postCloudEventToWatcher(common.ServiceDNSName(gateway.Name, namespace), gateway.Port, gateway.Endpoint, eventPayload); err != nil {
- gc.Log.WithField(common.LabelGatewayName, gateway.Name).WithError(err).Warn("failed to dispatch event to gateway watcher over http. communication error")
- completeSuccess = false
- }
- }
-
- response := "dispatched event to all watchers"
- if !completeSuccess {
- response = fmt.Sprintf("%s.%s", response, " although some of the dispatch operations failed, check logs for more info")
- }
-
- gc.Log.Info(response)
- return nil
-}
-
-// dispatchEventOverNats dispatches event over nats
-func (gc *GatewayConfig) dispatchEventOverNats(source string, eventPayload []byte) error {
- var err error
-
- switch gc.gw.Spec.EventProtocol.Nats.Type {
- case pc.Standard:
- err = gc.natsConn.Publish(source, eventPayload)
- case pc.Streaming:
- err = gc.natsStreamingConn.Publish(source, eventPayload)
- }
-
- if err != nil {
- gc.Log.WithField(common.LabelEventSource, source).WithError(err).Error("failed to publish event")
- return err
- }
-
- gc.Log.WithField(common.LabelEventSource, source).Info("event published successfully")
- return nil
-}
-
-// postCloudEventToWatcher makes a HTTP POST call to watcher's service
-func (gc *GatewayConfig) postCloudEventToWatcher(host string, port string, endpoint string, payload []byte) error {
- req, err := http.NewRequest("POST", fmt.Sprintf("http://%s:%s%s", host, port, endpoint), bytes.NewBuffer(payload))
- if err != nil {
- return err
- }
- req.Header.Set("Content-Type", "application/json")
-
- client := &http.Client{
- Timeout: 20 * time.Second,
- Transport: &http.Transport{
- Dial: (&net.Dialer{
- KeepAlive: 600 * time.Second,
- }).Dial,
- MaxIdleConns: 100,
- MaxIdleConnsPerHost: 50,
- },
- }
- _, err = client.Do(req)
- return err
-}
diff --git a/hack/e2e/manifests/gateway-controller-deployment.yaml b/hack/e2e/manifests/gateway-controller-deployment.yaml
index bf41cf1acc..f712815efd 100755
--- a/hack/e2e/manifests/gateway-controller-deployment.yaml
+++ b/hack/e2e/manifests/gateway-controller-deployment.yaml
@@ -15,13 +15,13 @@ spec:
spec:
serviceAccountName: argo-events-sa
containers:
- - name: gateway-controller
- image: argoproj/gateway-controller:v0.11
- imagePullPolicy: Always
- env:
- - name: GATEWAY_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: GATEWAY_CONTROLLER_CONFIG_MAP
- value: gateway-controller-configmap
+ - name: gateway-controller
+ image: argoproj/gateway-controller:v0.10-test
+ imagePullPolicy: Always
+ env:
+ - name: GATEWAY_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: GATEWAY_CONTROLLER_CONFIG_MAP
+ value: gateway-controller-configmap
diff --git a/hack/e2e/manifests/sensor-controller-deployment.yaml b/hack/e2e/manifests/sensor-controller-deployment.yaml
index ea8b8af6d0..c47734b884 100755
--- a/hack/e2e/manifests/sensor-controller-deployment.yaml
+++ b/hack/e2e/manifests/sensor-controller-deployment.yaml
@@ -15,13 +15,13 @@ spec:
spec:
serviceAccountName: argo-events-sa
containers:
- - name: sensor-controller
- image: argoproj/sensor-controller:v0.11
- imagePullPolicy: Always
- env:
- - name: SENSOR_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: SENSOR_CONFIG_MAP
- value: sensor-controller-configmap
+ - name: sensor-controller
+ image: argoproj/sensor-controller:v0.10-test
+ imagePullPolicy: Always
+ env:
+ - name: SENSOR_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SENSOR_CONFIG_MAP
+ value: sensor-controller-configmap
diff --git a/hack/k8s/manifests/argo-events-cluster-roles.yaml b/hack/k8s/manifests/argo-events-cluster-roles.yaml
index d485ac3a7f..d562529df5 100644
--- a/hack/k8s/manifests/argo-events-cluster-roles.yaml
+++ b/hack/k8s/manifests/argo-events-cluster-roles.yaml
@@ -7,9 +7,9 @@ roleRef:
kind: ClusterRole
name: argo-events-role
subjects:
-- kind: ServiceAccount
- name: argo-events-sa
- namespace: argo-events
+ - kind: ServiceAccount
+ name: argo-events-sa
+ namespace: argo-events
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
@@ -44,10 +44,14 @@ rules:
resources:
- workflows
- workflows/finalizers
+ - workflowtemplates
+ - workflowtemplates/finalizers
- gateways
- gateways/finalizers
- sensors
- sensors/finalizers
+ - eventsources
+ - eventsources/finalizers
- apiGroups:
- ""
resources:
diff --git a/hack/k8s/manifests/argo-events-role.yaml b/hack/k8s/manifests/argo-events-role.yaml
new file mode 100644
index 0000000000..089c803bd7
--- /dev/null
+++ b/hack/k8s/manifests/argo-events-role.yaml
@@ -0,0 +1,82 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: argo-events-role-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: argo-events-role
+subjects:
+ - kind: ServiceAccount
+ name: argo-events-sa
+ namespace: argo-events
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: argo-events-role
+rules:
+ - apiGroups:
+ - argoproj.io
+ verbs:
+ - create
+ - delete
+ - deletecollection
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ resources:
+ - workflows
+ - workflows/finalizers
+ - workflowtemplates
+ - workflowtemplates/finalizers
+ - gateways
+ - gateways/finalizers
+ - sensors
+ - sensors/finalizers
+ - eventsources
+ - eventsources/finalizers
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - pods/exec
+ - configmaps
+ - secrets
+ - services
+ - events
+ - persistentvolumeclaims
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - delete
+ - apiGroups:
+ - "batch"
+ resources:
+ - jobs
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - delete
+ - apiGroups:
+ - "apps"
+ resources:
+ - deployments
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - delete
diff --git a/hack/k8s/manifests/event-source-crd.yaml b/hack/k8s/manifests/event-source-crd.yaml
new file mode 100644
index 0000000000..5f79fc2d57
--- /dev/null
+++ b/hack/k8s/manifests/event-source-crd.yaml
@@ -0,0 +1,16 @@
+# Define a "event source" custom resource definition
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: eventsources.argoproj.io
+spec:
+ group: argoproj.io
+ scope: Namespaced
+ names:
+ kind: EventSource
+ plural: eventsources
+ singular: eventsource
+ listKind: EventSourceList
+ shortNames:
+ - es
+ version: "v1alpha1"
diff --git a/hack/k8s/manifests/gateway-controller-deployment.yaml b/hack/k8s/manifests/gateway-controller-deployment.yaml
index 0b4f37231c..e7d3b25d51 100644
--- a/hack/k8s/manifests/gateway-controller-deployment.yaml
+++ b/hack/k8s/manifests/gateway-controller-deployment.yaml
@@ -15,13 +15,13 @@ spec:
spec:
serviceAccountName: argo-events-sa
containers:
- - name: gateway-controller
- image: argoproj/gateway-controller
- imagePullPolicy: Always
- env:
- - name: GATEWAY_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: GATEWAY_CONTROLLER_CONFIG_MAP
- value: gateway-controller-configmap
+ - name: gateway-controller
+ image: argoproj/gateway-controller
+ imagePullPolicy: Always
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: CONTROLLER_CONFIG_MAP
+ value: gateway-controller-configmap
diff --git a/hack/k8s/manifests/gateway-crd.yaml b/hack/k8s/manifests/gateway-crd.yaml
index 6cab34ba89..7da5597b25 100644
--- a/hack/k8s/manifests/gateway-crd.yaml
+++ b/hack/k8s/manifests/gateway-crd.yaml
@@ -10,5 +10,7 @@ spec:
listKind: GatewayList
plural: gateways
singular: gateway
+ shortNames:
+ - gw
scope: Namespaced
- version: v1alpha1
+ version: "v1alpha1"
\ No newline at end of file
diff --git a/hack/k8s/manifests/installation.yaml b/hack/k8s/manifests/installation.yaml
new file mode 100644
index 0000000000..cd4bf0afce
--- /dev/null
+++ b/hack/k8s/manifests/installation.yaml
@@ -0,0 +1,217 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: gateways.argoproj.io
+spec:
+ group: argoproj.io
+ names:
+ kind: Gateway
+ listKind: GatewayList
+ plural: gateways
+ singular: gateway
+ shortNames:
+ - gw
+ scope: Namespaced
+ version: "v1alpha1"
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: sensors.argoproj.io
+spec:
+ group: argoproj.io
+ names:
+ kind: Sensor
+ listKind: SensorList
+ plural: sensors
+ singular: sensor
+ shortNames:
+ - sn
+ scope: Namespaced
+ version: "v1alpha1"
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: eventsources.argoproj.io
+spec:
+ group: argoproj.io
+ scope: Namespaced
+ names:
+ kind: EventSource
+ plural: eventsources
+ singular: eventsource
+ listKind: EventSourceList
+ shortNames:
+ - es
+ version: "v1alpha1"
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: argo-events
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: argo-events-sa
+ namespace: argo-events
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: argo-events-role-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: argo-events-role
+subjects:
+ - kind: ServiceAccount
+ name: argo-events-sa
+ namespace: argo-events
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: argo-events-role
+rules:
+ - apiGroups:
+ - argoproj.io
+ verbs:
+ - create
+ - delete
+ - deletecollection
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ resources:
+ - workflows
+ - workflows/finalizers
+ - workflowtemplates
+ - workflowtemplates/finalizers
+ - gateways
+ - gateways/finalizers
+ - sensors
+ - sensors/finalizers
+ - eventsources
+ - eventsources/finalizers
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - pods/exec
+ - configmaps
+ - secrets
+ - services
+ - events
+ - persistentvolumeclaims
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - delete
+ - apiGroups:
+ - "batch"
+ resources:
+ - jobs
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - delete
+ - apiGroups:
+ - "apps"
+ resources:
+ - deployments
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - delete
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: gateway-controller-configmap
+data:
+ config: |
+ instanceID: argo-events
+ namespace: argo-events
+---
+# The gateway-controller listens for changes on the gateway CRD and creates gateway
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: gateway-controller
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: gateway-controller
+ template:
+ metadata:
+ labels:
+ app: gateway-controller
+ spec:
+ serviceAccountName: argo-events-sa
+ containers:
+ - name: gateway-controller
+ image: argoproj/gateway-controller:v0.12-test
+ imagePullPolicy: Always
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: CONTROLLER_CONFIG_MAP
+ value: gateway-controller-configmap
+---
+# The sensor-controller configmap includes configuration information for the sensor-controller
+# To watch sensors created in different namespace than the controller is deployed in, remove the namespace: argo-events.
+# Similarly to watch sensors created in specific namespace, change to namespace:
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: sensor-controller-configmap
+data:
+ config: |
+ instanceID: argo-events
+ namespace: argo-events
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: sensor-controller
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: sensor-controller
+ template:
+ metadata:
+ labels:
+ app: sensor-controller
+ spec:
+ serviceAccountName: argo-events-sa
+ containers:
+ - name: sensor-controller
+ image: argoproj/sensor-controller:v0.12-test
+ imagePullPolicy: Always
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: CONTROLLER_CONFIG_MAP
+ value: sensor-controller-configmap
diff --git a/hack/k8s/manifests/sensor-controller-deployment.yaml b/hack/k8s/manifests/sensor-controller-deployment.yaml
index 43886f61e6..e22a03ab76 100644
--- a/hack/k8s/manifests/sensor-controller-deployment.yaml
+++ b/hack/k8s/manifests/sensor-controller-deployment.yaml
@@ -1,4 +1,3 @@
-# The sensor-controller listens for changes on the sensor CRD and creates sensor executor jobs
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -15,13 +14,13 @@ spec:
spec:
serviceAccountName: argo-events-sa
containers:
- - name: sensor-controller
- image: argoproj/sensor-controller
- imagePullPolicy: Always
- env:
- - name: SENSOR_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: SENSOR_CONFIG_MAP
- value: sensor-controller-configmap
+ - name: sensor-controller
+ image: argoproj/sensor-controller
+ imagePullPolicy: Always
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: CONTROLLER_CONFIG_MAP
+ value: sensor-controller-configmap
diff --git a/hack/k8s/manifests/sensor-crd.yaml b/hack/k8s/manifests/sensor-crd.yaml
index 5b50b24062..4808186ab5 100644
--- a/hack/k8s/manifests/sensor-crd.yaml
+++ b/hack/k8s/manifests/sensor-crd.yaml
@@ -10,5 +10,7 @@ spec:
listKind: SensorList
plural: sensors
singular: sensor
+ shortNames:
+ - sn
scope: Namespaced
- version: v1alpha1
+ version: "v1alpha1"
\ No newline at end of file
diff --git a/hack/k8s/manifests/workflow-crd.yaml b/hack/k8s/manifests/workflow-crd.yaml
deleted file mode 100644
index 230e6b1153..0000000000
--- a/hack/k8s/manifests/workflow-crd.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-apiVersion: "apiextensions.k8s.io/v1beta1"
-kind: "CustomResourceDefinition"
-metadata:
- name: "workflows.argoproj.io"
-spec:
- group: "argoproj.io"
- names:
- kind: "Workflow"
- plural: "workflows"
- shortNames: ["wf"]
- scope: "Namespaced"
- version: "v1alpha1"
\ No newline at end of file
diff --git a/hack/update-api-docs.sh b/hack/update-api-docs.sh
new file mode 100644
index 0000000000..351ad01106
--- /dev/null
+++ b/hack/update-api-docs.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+# Setup at https://github.com/ahmetb/gen-crd-api-reference-docs
+
+# Event Source
+${GOPATH}/src/github.com/ahmetb/gen-crd-api-reference-docs/gen-crd-api-reference-docs \
+ -config "${GOPATH}/src/github.com/ahmetb/gen-crd-api-reference-docs/example-config.json" \
+ -api-dir "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1" \
+ -out-file "${GOPATH}/src/github.com/argoproj/argo-events/api/event-source.html" \
+ -template-dir "${GOPATH}/src/github.com/ahmetb/gen-crd-api-reference-docs/template"
+
+# Gateway
+${GOPATH}/src/github.com/ahmetb/gen-crd-api-reference-docs/gen-crd-api-reference-docs \
+ -config "${GOPATH}/src/github.com/ahmetb/gen-crd-api-reference-docs/example-config.json" \
+ -api-dir "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1" \
+ -out-file "${GOPATH}/src/github.com/argoproj/argo-events/api/gateway.html" \
+ -template-dir "${GOPATH}/src/github.com/ahmetb/gen-crd-api-reference-docs/template"
+
+# Sensor
+${GOPATH}/src/github.com/ahmetb/gen-crd-api-reference-docs/gen-crd-api-reference-docs \
+ -config "${GOPATH}/src/github.com/ahmetb/gen-crd-api-reference-docs/example-config.json" \
+ -api-dir "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" \
+ -out-file "${GOPATH}/src/github.com/argoproj/argo-events/api/sensor.html" \
+ -template-dir "${GOPATH}/src/github.com/ahmetb/gen-crd-api-reference-docs/template"
+
+# Setup at https://pandoc.org/installing.html
+
+pandoc --from markdown --to gfm ${GOPATH}/src/github.com/argoproj/argo-events/api/event-source.html > ${GOPATH}/src/github.com/argoproj/argo-events/api/event-source.md
+pandoc --from markdown --to gfm ${GOPATH}/src/github.com/argoproj/argo-events/api/gateway.html > ${GOPATH}/src/github.com/argoproj/argo-events/api/gateway.md
+pandoc --from markdown --to gfm ${GOPATH}/src/github.com/argoproj/argo-events/api/sensor.html > ${GOPATH}/src/github.com/argoproj/argo-events/api/sensor.md
diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh
index ca8c1ae002..0f9f6678d4 100755
--- a/hack/update-codegen.sh
+++ b/hack/update-codegen.sh
@@ -1,20 +1,5 @@
#!/bin/bash
-# Copyright 2017 The Kubernetes Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
set -o errexit
set -o nounset
set -o pipefail
@@ -32,4 +17,9 @@ bash -x ${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \
"gateway:v1alpha1" \
--go-header-file $SCRIPT_ROOT/hack/custom-boilerplate.go.txt
+bash -x ${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \
+ github.com/argoproj/argo-events/pkg/client/eventsources github.com/argoproj/argo-events/pkg/apis \
+ "eventsources:v1alpha1" \
+ --go-header-file $SCRIPT_ROOT/hack/custom-boilerplate.go.txt
+
go run $SCRIPT_ROOT/vendor/k8s.io/gengo/examples/deepcopy-gen/main.go -i github.com/argoproj/argo-events/pkg/apis/common -p github.com/argoproj/argo-events/pkg/apis/common --go-header-file $SCRIPT_ROOT/vendor/k8s.io/gengo/boilerplate/boilerplate.go.txt
diff --git a/hack/update-openapigen.sh b/hack/update-openapigen.sh
index 2103907052..92fc82f823 100755
--- a/hack/update-openapigen.sh
+++ b/hack/update-openapigen.sh
@@ -21,3 +21,10 @@ go run ${CODEGEN_PKG}/cmd/openapi-gen/openapi-gen.go \
--input-dirs github.com/argoproj/argo-events/pkg/apis/gateway/${VERSION} \
--output-package github.com/argoproj/argo-events/pkg/apis/gateway/${VERSION} \
$@
+
+# EventSource
+go run ${CODEGEN_PKG}/cmd/openapi-gen/openapi-gen.go \
+ --go-header-file ${PROJECT_ROOT}/hack/custom-boilerplate.go.txt \
+ --input-dirs github.com/argoproj/argo-events/pkg/apis/eventsources/${VERSION} \
+ --output-package github.com/argoproj/argo-events/pkg/apis/eventsources/${VERSION} \
+ $@
diff --git a/mkdocs.yml b/mkdocs.yml
index ce2f31bf4f..cbeceb667e 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -9,19 +9,19 @@ theme:
text: 'Work Sans'
logo: 'assets/logo.png'
google_analytics:
-- 'UA-105170809-2'
-- 'auto'
+ - 'UA-105170809-2'
+ - 'auto'
markdown_extensions:
-- codehilite
-- admonition
-- toc:
- permalink: true
+ - codehilite
+ - admonition
+ - toc:
+ permalink: true
nav:
- Overview: 'index.md'
- 'installation.md'
- 'getting_started.md'
- Setup:
- - gateways/artifact.md
+ - gateways/minio.md
- gateways/aws-sns.md
- gateways/aws-sqs.md
- gateways/calendar.md
diff --git a/pkg/apis/common/deepcopy_generated.go b/pkg/apis/common/deepcopy_generated.go
index 2f98673114..2438d5d7e0 100644
--- a/pkg/apis/common/deepcopy_generated.go
+++ b/pkg/apis/common/deepcopy_generated.go
@@ -203,24 +203,6 @@ func (in *S3Filter) DeepCopy() *S3Filter {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ServiceTemplateSpec) DeepCopyInto(out *ServiceTemplateSpec) {
- *out = *in
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceTemplateSpec.
-func (in *ServiceTemplateSpec) DeepCopy() *ServiceTemplateSpec {
- if in == nil {
- return nil
- }
- out := new(ServiceTemplateSpec)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *URI) DeepCopyInto(out *URI) {
*out = *in
diff --git a/pkg/apis/common/event-sources.go b/pkg/apis/common/event-sources.go
new file mode 100644
index 0000000000..9e45d62aa9
--- /dev/null
+++ b/pkg/apis/common/event-sources.go
@@ -0,0 +1,41 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+// EventSourceType is the type of event source supported by the gateway
+type EventSourceType string
+
+// possible event source types
+var (
+ MinioEvent EventSourceType = "minio"
+ CalendarEvent EventSourceType = "calendar"
+ FileEvent EventSourceType = "file"
+ ResourceEvent EventSourceType = "resource"
+ WebhookEvent EventSourceType = "webhook"
+ AMQPEvent EventSourceType = "amqp"
+ KafkaEvent EventSourceType = "kafka"
+ MQTTEvent EventSourceType = "mqtt"
+ NATSEvent EventSourceType = "nats"
+ SNSEvent EventSourceType = "sns"
+ SQSEvent EventSourceType = "sqs"
+ PubSubEvent EventSourceType = "pubsub"
+ GitHubEvent EventSourceType = "github"
+ GitLabEvent EventSourceType = "gitlab"
+ HDFSEvent EventSourceType = "hdfs"
+ SlackEvent EventSourceType = "slack"
+ StorageGridEvent EventSourceType = "storagegrid"
+)
diff --git a/pkg/apis/common/event.go b/pkg/apis/common/event.go
index f40e86cec7..ef7af8fdfb 100644
--- a/pkg/apis/common/event.go
+++ b/pkg/apis/common/event.go
@@ -17,7 +17,6 @@ limitations under the License.
package common
import (
- corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -104,10 +103,8 @@ type URI struct {
// Dispatch protocol contains configuration necessary to dispatch an event to sensor over different communication protocols
type EventProtocol struct {
Type EventProtocolType `json:"type" protobuf:"bytes,1,opt,name=type"`
-
- Http Http `json:"http" protobuf:"bytes,2,opt,name=http"`
-
- Nats Nats `json:"nats" protobuf:"bytes,3,opt,name=nats"`
+ Http Http `json:"http" protobuf:"bytes,2,opt,name=http"`
+ Nats Nats `json:"nats" protobuf:"bytes,3,opt,name=nats"`
}
// Http contains the information required to setup a http server and listen to incoming events
@@ -148,16 +145,3 @@ type Nats struct {
// Type of the connection. either standard or streaming
Type NatsType `json:"type" protobuf:"bytes,10,opt,name=type"`
}
-
-// ServiceTemplateSpec is the template spec contains metadata and service spec.
-type ServiceTemplateSpec struct {
- // Standard object's metadata.
- // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
- // +optional
- metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
-
- // Specification of the desired behavior of the pod.
- // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
- // +optional
- Spec corev1.ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
-}
diff --git a/pkg/apis/common/s3.go b/pkg/apis/common/s3.go
index c296745d63..58aa6674dd 100644
--- a/pkg/apis/common/s3.go
+++ b/pkg/apis/common/s3.go
@@ -20,7 +20,7 @@ import (
corev1 "k8s.io/api/core/v1"
)
-// S3Artifact contains information about an artifact in S3
+// S3Artifact contains information about an S3 connection and bucket
type S3Artifact struct {
Endpoint string `json:"endpoint" protobuf:"bytes,1,opt,name=endpoint"`
Bucket *S3Bucket `json:"bucket" protobuf:"bytes,2,opt,name=bucket"`
diff --git a/gateways/common/validate_test.go b/pkg/apis/eventsources/register.go
similarity index 68%
rename from gateways/common/validate_test.go
rename to pkg/apis/eventsources/register.go
index a944870c80..da5e971df9 100644
--- a/gateways/common/validate_test.go
+++ b/pkg/apis/eventsources/register.go
@@ -14,18 +14,15 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package common
+package eventsources
-import (
- "github.com/smartystreets/goconvey/convey"
- "testing"
-)
-
-type fakeEventSource struct {
- Msg string `json:"msg"`
-}
+const (
+ // Group is the API Group
+ Group string = "argoproj.io"
-func TestValidateGatewayEventSource(t *testing.T) {
- convey.Convey("Given an event source, validate it", t, func() {
- })
-}
+ // EventSource constants
+ Kind string = "EventSource"
+ Singular string = "eventsource"
+ Plural string = "eventsources"
+ FullName string = Plural + "." + Group
+)
diff --git a/pkg/apis/eventsources/v1alpha1/doc.go b/pkg/apis/eventsources/v1alpha1/doc.go
new file mode 100644
index 0000000000..3556a921ff
--- /dev/null
+++ b/pkg/apis/eventsources/v1alpha1/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1alpha1 is the v1alpha1 version of the API.
+// +groupName=argoproj.io
+// +k8s:deepcopy-gen=package,register
+// +k8s:openapi-gen=true
+package v1alpha1
diff --git a/pkg/apis/eventsources/v1alpha1/openapi_generated.go b/pkg/apis/eventsources/v1alpha1/openapi_generated.go
new file mode 100644
index 0000000000..dde589f1c0
--- /dev/null
+++ b/pkg/apis/eventsources/v1alpha1/openapi_generated.go
@@ -0,0 +1,1391 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by openapi-gen. DO NOT EDIT.
+
+// This file was autogenerated by openapi-gen. Do not edit it manually!
+
+package v1alpha1
+
+import (
+ spec "github.com/go-openapi/spec"
+ common "k8s.io/kube-openapi/pkg/common"
+)
+
+func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
+ return map[string]common.OpenAPIDefinition{
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.AMQPEventSource": schema_pkg_apis_eventsources_v1alpha1_AMQPEventSource(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.CalendarEventSource": schema_pkg_apis_eventsources_v1alpha1_CalendarEventSource(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.EventSource": schema_pkg_apis_eventsources_v1alpha1_EventSource(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.EventSourceList": schema_pkg_apis_eventsources_v1alpha1_EventSourceList(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.EventSourceSpec": schema_pkg_apis_eventsources_v1alpha1_EventSourceSpec(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.EventSourceStatus": schema_pkg_apis_eventsources_v1alpha1_EventSourceStatus(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.FileEventSource": schema_pkg_apis_eventsources_v1alpha1_FileEventSource(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.GithubEventSource": schema_pkg_apis_eventsources_v1alpha1_GithubEventSource(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.GitlabEventSource": schema_pkg_apis_eventsources_v1alpha1_GitlabEventSource(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.HDFSEventSource": schema_pkg_apis_eventsources_v1alpha1_HDFSEventSource(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.KafkaEventSource": schema_pkg_apis_eventsources_v1alpha1_KafkaEventSource(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.MQTTEventSource": schema_pkg_apis_eventsources_v1alpha1_MQTTEventSource(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.NATSEventsSource": schema_pkg_apis_eventsources_v1alpha1_NATSEventsSource(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.PubSubEventSource": schema_pkg_apis_eventsources_v1alpha1_PubSubEventSource(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.ResourceEventSource": schema_pkg_apis_eventsources_v1alpha1_ResourceEventSource(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.ResourceFilter": schema_pkg_apis_eventsources_v1alpha1_ResourceFilter(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.SNSEventSource": schema_pkg_apis_eventsources_v1alpha1_SNSEventSource(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.SQSEventSource": schema_pkg_apis_eventsources_v1alpha1_SQSEventSource(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.SlackEventSource": schema_pkg_apis_eventsources_v1alpha1_SlackEventSource(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.StorageGridEventSource": schema_pkg_apis_eventsources_v1alpha1_StorageGridEventSource(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.StorageGridFilter": schema_pkg_apis_eventsources_v1alpha1_StorageGridFilter(ref),
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_AMQPEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "AMQPEventSource refers to an event-source for AMQP stream events",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "url": {
+ SchemaProps: spec.SchemaProps{
+ Description: "URL for rabbitmq service",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "exchangeName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ExchangeName is the exchange name For more information, visit https://www.rabbitmq.com/tutorials/amqp-concepts.html",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "exchangeType": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ExchangeType is rabbitmq exchange type",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "routingKey": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Routing key for bindings",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "connectionBackoff": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Backoff holds parameters applied to connection.",
+ Ref: ref("github.com/argoproj/argo-events/common.Backoff"),
+ },
+ },
+ },
+ Required: []string{"url", "exchangeName", "exchangeType", "routingKey"},
+ },
+ },
+ Dependencies: []string{
+ "github.com/argoproj/argo-events/common.Backoff"},
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_CalendarEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "CalendarEventSource describes a time based dependency. One of the fields (schedule, interval, or recurrence) must be passed. Schedule takes precedence over interval; interval takes precedence over recurrence",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "schedule": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Schedule is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "interval": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Interval is a string that describes an interval duration, e.g. 1s, 30m, 2h...",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "exclusionDates": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-list-type": "string",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "ExclusionDates defines the list of DATE-TIME exceptions for recurring events.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "timezone": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Timezone in which to run the schedule",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "userPayload": {
+ SchemaProps: spec.SchemaProps{
+ Description: "UserPayload will be sent to sensor as extra data once the event is triggered",
+ Type: []string{"string"},
+ Format: "byte",
+ },
+ },
+ },
+ Required: []string{"schedule", "interval"},
+ },
+ },
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_EventSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "EventSource is the definition of a eventsource resource",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "kind": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "apiVersion": {
+ SchemaProps: spec.SchemaProps{
+ Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "metadata": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
+ },
+ },
+ "status": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.EventSourceStatus"),
+ },
+ },
+ "spec": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.EventSourceSpec"),
+ },
+ },
+ },
+ Required: []string{"metadata", "status", "spec"},
+ },
+ },
+ Dependencies: []string{
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.EventSourceSpec", "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.EventSourceStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_EventSourceList(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "EventSourceList is the list of eventsource resources",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "kind": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "apiVersion": {
+ SchemaProps: spec.SchemaProps{
+ Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "metadata": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
+ },
+ },
+ "items": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-list-type": "eventsource",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.EventSource"),
+ },
+ },
+ },
+ },
+ },
+ },
+ Required: []string{"metadata", "items"},
+ },
+ },
+ Dependencies: []string{
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.EventSource", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_EventSourceSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "EventSourceSpec refers to specification of event-source resource",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "minio": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Minio event sources",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.S3Artifact"),
+ },
+ },
+ },
+ },
+ },
+ "calendar": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Calendar event sources",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.CalendarEventSource"),
+ },
+ },
+ },
+ },
+ },
+ "file": {
+ SchemaProps: spec.SchemaProps{
+ Description: "File event sources",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.FileEventSource"),
+ },
+ },
+ },
+ },
+ },
+ "resource": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Resource event sources",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.ResourceEventSource"),
+ },
+ },
+ },
+ },
+ },
+ "webhook": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Webhook event sources",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/gateways/server/common/webhook.Context"),
+ },
+ },
+ },
+ },
+ },
+ "amqp": {
+ SchemaProps: spec.SchemaProps{
+ Description: "AMQP event sources",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.AMQPEventSource"),
+ },
+ },
+ },
+ },
+ },
+ "kafka": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Kafka event sources",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.KafkaEventSource"),
+ },
+ },
+ },
+ },
+ },
+ "mqtt": {
+ SchemaProps: spec.SchemaProps{
+ Description: "MQTT event sources",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.MQTTEventSource"),
+ },
+ },
+ },
+ },
+ },
+ "nats": {
+ SchemaProps: spec.SchemaProps{
+ Description: "NATS event sources",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.NATSEventsSource"),
+ },
+ },
+ },
+ },
+ },
+ "sns": {
+ SchemaProps: spec.SchemaProps{
+ Description: "SNS event sources",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.SNSEventSource"),
+ },
+ },
+ },
+ },
+ },
+ "sqs": {
+ SchemaProps: spec.SchemaProps{
+ Description: "SQS event sources",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.SQSEventSource"),
+ },
+ },
+ },
+ },
+ },
+ "pubSub": {
+ SchemaProps: spec.SchemaProps{
+ Description: "PubSub eevnt sources",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.PubSubEventSource"),
+ },
+ },
+ },
+ },
+ },
+ "github": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Github event sources",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.GithubEventSource"),
+ },
+ },
+ },
+ },
+ },
+ "gitlab": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Gitlab event sources",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.GitlabEventSource"),
+ },
+ },
+ },
+ },
+ },
+ "hdfs": {
+ SchemaProps: spec.SchemaProps{
+ Description: "HDFS event sources",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.HDFSEventSource"),
+ },
+ },
+ },
+ },
+ },
+ "slack": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Slack event sources",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.SlackEventSource"),
+ },
+ },
+ },
+ },
+ },
+ "storageGrid": {
+ SchemaProps: spec.SchemaProps{
+ Description: "StorageGrid event sources",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.StorageGridEventSource"),
+ },
+ },
+ },
+ },
+ },
+ "type": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Type of the event source",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"type"},
+ },
+ },
+ Dependencies: []string{
+ "github.com/argoproj/argo-events/gateways/server/common/webhook.Context", "github.com/argoproj/argo-events/pkg/apis/common.S3Artifact", "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.AMQPEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.CalendarEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.FileEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.GithubEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.GitlabEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.HDFSEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.KafkaEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.MQTTEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.NATSEventsSource", "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.PubSubEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.ResourceEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.SNSEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.SQSEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.SlackEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.StorageGridEventSource"},
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_EventSourceStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "EventSourceStatus holds the status of the event-source resource",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "createdAt": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_FileEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "FileEventSource describes an event-source for file related events.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "eventType": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Type of file operations to watch Refer https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go for more information",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "watchPathConfig": {
+ SchemaProps: spec.SchemaProps{
+ Description: "WatchPathConfig contains configuration about the file path to watch",
+ Ref: ref("github.com/argoproj/argo-events/gateways/server/common/fsevent.WatchPathConfig"),
+ },
+ },
+ },
+ Required: []string{"eventType", "watchPathConfig"},
+ },
+ },
+ Dependencies: []string{
+ "github.com/argoproj/argo-events/gateways/server/common/fsevent.WatchPathConfig"},
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_GithubEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "GithubEventSource refers to event-source for github related events",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "id": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Id is the webhook's id",
+ Type: []string{"integer"},
+ Format: "int64",
+ },
+ },
+ "webhook": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Webhook refers to the configuration required to run a http server",
+ Ref: ref("github.com/argoproj/argo-events/gateways/server/common/webhook.Context"),
+ },
+ },
+ "owner": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Owner refers to GitHub owner name i.e. argoproj",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "repository": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Repository refers to GitHub repo name i.e. argo-events",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "events": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-list-type": "string",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "Events refer to Github events to subscribe to which the gateway will subscribe",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "apiToken": {
+ SchemaProps: spec.SchemaProps{
+ Description: "APIToken refers to a K8s secret containing github api token",
+ Ref: ref("k8s.io/api/core/v1.SecretKeySelector"),
+ },
+ },
+ "webhookSecret": {
+ SchemaProps: spec.SchemaProps{
+ Description: "WebhookSecret refers to K8s secret containing GitHub webhook secret https://developer.github.com/webhooks/securing/",
+ Ref: ref("k8s.io/api/core/v1.SecretKeySelector"),
+ },
+ },
+ "insecure": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Insecure tls verification",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "active": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Active refers to status of the webhook for event deliveries. https://developer.github.com/webhooks/creating/#active",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "contentType": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ContentType of the event delivery",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "githubBaseURL": {
+ SchemaProps: spec.SchemaProps{
+ Description: "GitHub base URL (for GitHub Enterprise)",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "githubUploadURL": {
+ SchemaProps: spec.SchemaProps{
+ Description: "GitHub upload URL (for GitHub Enterprise)",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "namespace": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Namespace refers to Kubernetes namespace which is used to retrieve webhook secret and api token from.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "deleteHookOnFinish": {
+ SchemaProps: spec.SchemaProps{
+ Description: "DeleteHookOnFinish determines whether to delete the GitHub hook for the repository once the event source is stopped.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"id", "webhook", "owner", "repository", "events", "apiToken", "namespace"},
+ },
+ },
+ Dependencies: []string{
+ "github.com/argoproj/argo-events/gateways/server/common/webhook.Context", "k8s.io/api/core/v1.SecretKeySelector"},
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_GitlabEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "GitlabEventSource refers to event-source related to Gitlab events",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "webhook": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Webhook holds configuration to run a http server",
+ Ref: ref("github.com/argoproj/argo-events/gateways/server/common/webhook.Context"),
+ },
+ },
+ "projectId": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ProjectId is the id of project for which integration needs to setup",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "event": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Event is a gitlab event to listen to. Refer https://github.com/xanzy/go-gitlab/blob/bf34eca5d13a9f4c3f501d8a97b8ac226d55e4d9/projects.go#L794.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "accessToken": {
+ SchemaProps: spec.SchemaProps{
+ Description: "AccessToken is reference to k8 secret which holds the gitlab api access information",
+ Ref: ref("k8s.io/api/core/v1.SecretKeySelector"),
+ },
+ },
+ "enableSSLVerification": {
+ SchemaProps: spec.SchemaProps{
+ Description: "EnableSSLVerification to enable ssl verification",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "gitlabBaseURL": {
+ SchemaProps: spec.SchemaProps{
+ Description: "GitlabBaseURL is the base URL for API requests to a custom endpoint",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "namespace": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Namespace refers to Kubernetes namespace which is used to retrieve access token from.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "deleteHookOnFinish": {
+ SchemaProps: spec.SchemaProps{
+ Description: "DeleteHookOnFinish determines whether to delete the GitLab hook for the project once the event source is stopped.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"webhook", "projectId", "event", "accessToken", "gitlabBaseURL", "namespace"},
+ },
+ },
+ Dependencies: []string{
+ "github.com/argoproj/argo-events/gateways/server/common/webhook.Context", "k8s.io/api/core/v1.SecretKeySelector"},
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_HDFSEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "HDFSEventSource refers to event-source for HDFS related events",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "directory": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Directory to watch for events",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "path": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Path is relative path of object to watch with respect to the directory",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "pathRegexp": {
+ SchemaProps: spec.SchemaProps{
+ Description: "PathRegexp is regexp of relative path of object to watch with respect to the directory",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "type": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Type of file operations to watch",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "checkInterval": {
+ SchemaProps: spec.SchemaProps{
+ Description: "CheckInterval is a string that describes an interval duration to check the directory state, e.g. 1s, 30m, 2h... (defaults to 1m)",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "addresses": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-list-type": "string",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "Addresses is accessible addresses of HDFS name nodes",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "hdfsUser": {
+ SchemaProps: spec.SchemaProps{
+ Description: "HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "krbCCacheSecret": {
+ SchemaProps: spec.SchemaProps{
+ Description: "KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.",
+ Ref: ref("k8s.io/api/core/v1.SecretKeySelector"),
+ },
+ },
+ "krbKeytabSecret": {
+ SchemaProps: spec.SchemaProps{
+ Description: "KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.",
+ Ref: ref("k8s.io/api/core/v1.SecretKeySelector"),
+ },
+ },
+ "krbUsername": {
+ SchemaProps: spec.SchemaProps{
+ Description: "KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "krbRealm": {
+ SchemaProps: spec.SchemaProps{
+ Description: "KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "krbConfigConfigMap": {
+ SchemaProps: spec.SchemaProps{
+ Description: "KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.",
+ Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"),
+ },
+ },
+ "krbServicePrincipalName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "namespace": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Namespace refers to Kubernetes namespace which is used to retrieve cache secret and ket tab secret from.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"directory", "type", "addresses", "namespace"},
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.SecretKeySelector"},
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_KafkaEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "KafkaEventSource refers to event-source for Kafka related events",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "url": {
+ SchemaProps: spec.SchemaProps{
+ Description: "URL to kafka cluster",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "partition": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Partition name",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "topic": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Topic name",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "connectionBackoff": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Backoff holds parameters applied to connection.",
+ Ref: ref("github.com/argoproj/argo-events/common.Backoff"),
+ },
+ },
+ },
+ Required: []string{"url", "partition", "topic"},
+ },
+ },
+ Dependencies: []string{
+ "github.com/argoproj/argo-events/common.Backoff"},
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_MQTTEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "MQTTEventSource refers to event-source for MQTT related events",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "url": {
+ SchemaProps: spec.SchemaProps{
+ Description: "URL to connect to broker",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "topic": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Topic name",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "clientId": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ClientID is the id of the client",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "connectionBackoff": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ConnectionBackoff holds backoff applied to connection.",
+ Ref: ref("github.com/argoproj/argo-events/common.Backoff"),
+ },
+ },
+ },
+ Required: []string{"url", "topic", "clientId"},
+ },
+ },
+ Dependencies: []string{
+ "github.com/argoproj/argo-events/common.Backoff"},
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_NATSEventsSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "NATSEventSource refers to event-source for NATS related events",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "url": {
+ SchemaProps: spec.SchemaProps{
+ Description: "URL to connect to NATS cluster",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "subject": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Subject holds the name of the subject onto which messages are published",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "connectionBackoff": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ConnectionBackoff holds backoff applied to connection.",
+ Ref: ref("github.com/argoproj/argo-events/common.Backoff"),
+ },
+ },
+ },
+ Required: []string{"url", "subject"},
+ },
+ },
+ Dependencies: []string{
+ "github.com/argoproj/argo-events/common.Backoff"},
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_PubSubEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "PubSubEventSource refers to event-source for GCP PubSub related events.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "projectID": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ProjectID is the unique identifier for your project on GCP",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "topicProjectID": {
+ SchemaProps: spec.SchemaProps{
+ Description: "TopicProjectID identifies the project where the topic should exist or be created (assumed to be the same as ProjectID by default)",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "topic": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Topic on which a subscription will be created",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "credentialsFile": {
+ SchemaProps: spec.SchemaProps{
+ Description: "CredentialsFile is the file that contains credentials to authenticate for GCP",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "deleteSubscriptionOnFinish": {
+ SchemaProps: spec.SchemaProps{
+ Description: "DeleteSubscriptionOnFinish determines whether to delete the GCP PubSub subscription once the event source is stopped.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"projectID", "topicProjectID", "topic", "credentialsFile"},
+ },
+ },
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_ResourceEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "ResourceEventSource refers to a event-source for K8s resource related events.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "namespace": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Namespace where resource is deployed",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "filter": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Filter is applied on the metadata of the resource",
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.ResourceFilter"),
+ },
+ },
+ "group": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "version": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "resource": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "eventType": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Type is the event type. If not provided, the gateway will watch all events for a resource.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"namespace", "group", "version", "resource"},
+ },
+ },
+ Dependencies: []string{
+ "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.ResourceFilter"},
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_ResourceFilter(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "ResourceFilter contains K8 ObjectMeta information to further filter resource event objects",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "prefix": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "labels": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "fields": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "createdBy": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_SNSEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "SNSEventSource refers to event-source for AWS SNS related events",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "webhook": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Webhook configuration for http server",
+ Ref: ref("github.com/argoproj/argo-events/gateways/server/common/webhook.Context"),
+ },
+ },
+ "topicArn": {
+ SchemaProps: spec.SchemaProps{
+ Description: "TopicArn",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "accessKey": {
+ SchemaProps: spec.SchemaProps{
+ Description: "AccessKey refers K8 secret containing aws access key",
+ Ref: ref("k8s.io/api/core/v1.SecretKeySelector"),
+ },
+ },
+ "secretKey": {
+ SchemaProps: spec.SchemaProps{
+ Description: "SecretKey refers K8 secret containing aws secret key",
+ Ref: ref("k8s.io/api/core/v1.SecretKeySelector"),
+ },
+ },
+ "namespace": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Namespace refers to Kubernetes namespace to read access related secret from.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "region": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Region is AWS region",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"webhook", "topicArn", "region"},
+ },
+ },
+ Dependencies: []string{
+ "github.com/argoproj/argo-events/gateways/server/common/webhook.Context", "k8s.io/api/core/v1.SecretKeySelector"},
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_SQSEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "SQSEventSource refers to event-source for AWS SQS related events",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "accessKey": {
+ SchemaProps: spec.SchemaProps{
+ Description: "AccessKey refers K8 secret containing aws access key",
+ Ref: ref("k8s.io/api/core/v1.SecretKeySelector"),
+ },
+ },
+ "secretKey": {
+ SchemaProps: spec.SchemaProps{
+ Description: "SecretKey refers K8 secret containing aws secret key",
+ Ref: ref("k8s.io/api/core/v1.SecretKeySelector"),
+ },
+ },
+ "region": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Region is AWS region",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "queue": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Queue is AWS SQS queue to listen to for messages",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "waitTimeSeconds": {
+ SchemaProps: spec.SchemaProps{
+ Description: "WaitTimeSeconds is The duration (in seconds) for which the call waits for a message to arrive in the queue before returning.",
+ Type: []string{"integer"},
+ Format: "int64",
+ },
+ },
+ "namespace": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Namespace refers to Kubernetes namespace to read access related secret from.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"region", "queue", "waitTimeSeconds"},
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.SecretKeySelector"},
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_SlackEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "SlackEventSource refers to event-source for Slack related events",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "signingSecret": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Slack App signing secret",
+ Ref: ref("k8s.io/api/core/v1.SecretKeySelector"),
+ },
+ },
+ "token": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Token for URL verification handshake",
+ Ref: ref("k8s.io/api/core/v1.SecretKeySelector"),
+ },
+ },
+ "webhook": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Webhook holds configuration for a REST endpoint",
+ Ref: ref("github.com/argoproj/argo-events/gateways/server/common/webhook.Context"),
+ },
+ },
+ "namespace": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Namespace refers to Kubernetes namespace which is used to retrieve token and signing secret from.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"webhook", "namespace"},
+ },
+ },
+ Dependencies: []string{
+ "github.com/argoproj/argo-events/gateways/server/common/webhook.Context", "k8s.io/api/core/v1.SecretKeySelector"},
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_StorageGridEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "StorageGridEventSource refers to event-source for StorageGrid related events",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "webhook": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Webhook holds configuration for a REST endpoint",
+ Ref: ref("github.com/argoproj/argo-events/gateways/server/common/webhook.Context"),
+ },
+ },
+ "events": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-list-type": "string",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "Events are s3 bucket notification events. For more information on s3 notifications, follow https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations Note that storage grid notifications do not contain `s3:`",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "filter": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Filter on object key which caused the notification.",
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.StorageGridFilter"),
+ },
+ },
+ },
+ Required: []string{"webhook"},
+ },
+ },
+ Dependencies: []string{
+ "github.com/argoproj/argo-events/gateways/server/common/webhook.Context", "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1.StorageGridFilter"},
+ }
+}
+
+func schema_pkg_apis_eventsources_v1alpha1_StorageGridFilter(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "Filter represents filters to apply to bucket notifications for specifying constraints on objects",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "prefix": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "suffix": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"prefix", "suffix"},
+ },
+ },
+ }
+}
diff --git a/pkg/apis/eventsources/v1alpha1/register.go b/pkg/apis/eventsources/v1alpha1/register.go
new file mode 100644
index 0000000000..8eebe9a21a
--- /dev/null
+++ b/pkg/apis/eventsources/v1alpha1/register.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package v1alpha1
+
+import (
+ event_sources "github.com/argoproj/argo-events/pkg/apis/eventsources"
+ "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// SchemeGroupVersion is a group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: event_sources.Group, Version: "v1alpha1"}
+
+// SchemaGroupVersionKind is a group version kind used to attach owner references to gateway-controller
+var SchemaGroupVersionKind = schema.GroupVersionKind{Group: event_sources.Group, Version: "v1alpha1", Kind: event_sources.Kind}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes unqualified resource and returns Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // SchemeBuilder is the builder for this scheme
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+
+ // AddToScheme adds this
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &EventSource{},
+ &EventSourceList{},
+ )
+ v1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/pkg/apis/eventsources/v1alpha1/types.go b/pkg/apis/eventsources/v1alpha1/types.go
new file mode 100644
index 0000000000..7800046f27
--- /dev/null
+++ b/pkg/apis/eventsources/v1alpha1/types.go
@@ -0,0 +1,387 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package v1alpha1
+
+import (
+ "encoding/json"
+
+ "github.com/argoproj/argo-events/common"
+ "github.com/argoproj/argo-events/gateways/server/common/fsevent"
+ "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// EventSource is the definition of a eventsource resource
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=true
+type EventSource struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
+ Status EventSourceStatus `json:"status" protobuf:"bytes,2,opt,name=status"`
+ Spec *EventSourceSpec `json:"spec" protobuf:"bytes,3,opt,name=spec"`
+}
+
+// EventSourceList is the list of eventsource resources
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type EventSourceList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
+ // +listType=eventsource
+ Items []EventSource `json:"items" protobuf:"bytes,2,opt,name=items"`
+}
+
+// EventSourceSpec refers to specification of event-source resource
+type EventSourceSpec struct {
+ // Minio event sources
+ Minio map[string]apicommon.S3Artifact `json:"minio,omitempty" protobuf:"bytes,1,opt,name=minio"`
+ // Calendar event sources
+ Calendar map[string]CalendarEventSource `json:"calendar,omitempty" protobuf:"bytes,2,opt,name=calendar"`
+ // File event sources
+ File map[string]FileEventSource `json:"file,omitempty" protobuf:"bytes,3,opt,name=file"`
+ // Resource event sources
+ Resource map[string]ResourceEventSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"`
+ // Webhook event sources
+ Webhook map[string]webhook.Context `json:"webhook,omitempty" protobuf:"bytes,5,opt,name=webhook"`
+ // AMQP event sources
+ AMQP map[string]AMQPEventSource `json:"amqp,omitempty" protobuf:"bytes,6,opt,name=amqp"`
+ // Kafka event sources
+ Kafka map[string]KafkaEventSource `json:"kafka,omitempty" protobuf:"bytes,7,opt,name=kafka"`
+ // MQTT event sources
+ MQTT map[string]MQTTEventSource `json:"mqtt,omitempty" protobuf:"bytes,8,opt,name=mqtt"`
+ // NATS event sources
+ NATS map[string]NATSEventsSource `json:"nats,omitempty" protobuf:"bytes,9,opt,name=nats"`
+ // SNS event sources
+ SNS map[string]SNSEventSource `json:"sns,omitempty" protobuf:"bytes,10,opt,name=sns"`
+ // SQS event sources
+ SQS map[string]SQSEventSource `json:"sqs,omitempty" protobuf:"bytes,11,opt,name=sqs"`
+ // PubSub eevnt sources
+ PubSub map[string]PubSubEventSource `json:"pubSub,omitempty" protobuf:"bytes,12,opt,name=pubSub"`
+ // Github event sources
+ Github map[string]GithubEventSource `json:"github,omitempty" protobuf:"bytes,13,opt,name=github"`
+ // Gitlab event sources
+ Gitlab map[string]GitlabEventSource `json:"gitlab,omitempty" protobuf:"bytes,14,opt,name=gitlab"`
+ // HDFS event sources
+ HDFS map[string]HDFSEventSource `json:"hdfs,omitempty" protobuf:"bytes,15,opt,name=hdfs"`
+ // Slack event sources
+ Slack map[string]SlackEventSource `json:"slack,omitempty" protobuf:"bytes,16,opt,name=slack"`
+ // StorageGrid event sources
+ StorageGrid map[string]StorageGridEventSource `json:"storageGrid,omitempty" protobuf:"bytes,17,opt,name=storageGrid"`
+ // Type of the event source
+ Type apicommon.EventSourceType `json:"type" protobuf:"bytes,19,name=type"`
+}
+
+// CalendarEventSource describes a time based dependency. One of the fields (schedule, interval, or recurrence) must be passed.
+// Schedule takes precedence over interval; interval takes precedence over recurrence
+type CalendarEventSource struct {
+ // Schedule is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron
+ Schedule string `json:"schedule" protobuf:"bytes,1,name=schedule"`
+ // Interval is a string that describes an interval duration, e.g. 1s, 30m, 2h...
+ Interval string `json:"interval" protobuf:"bytes,2,name=interval"`
+ // ExclusionDates defines the list of DATE-TIME exceptions for recurring events.
+ // +listType=string
+ ExclusionDates []string `json:"exclusionDates,omitempty" protobuf:"bytes,3,opt,name=exclusionDates"`
+ // Timezone in which to run the schedule
+ // +optional
+ Timezone string `json:"timezone,omitempty" protobuf:"bytes,4,opt,name=timezone"`
+ // UserPayload will be sent to sensor as extra data once the event is triggered
+ // +optional
+ UserPayload *json.RawMessage `json:"userPayload,omitempty" protobuf:"bytes,5,opt,name=userPayload"`
+}
+
+// FileEventSource describes an event-source for file related events.
+type FileEventSource struct {
+ // Type of file operations to watch
+ // Refer https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go for more information
+ EventType string `json:"eventType" protobuf:"bytes,1,name=eventType"`
+ // WatchPathConfig contains configuration about the file path to watch
+ WatchPathConfig fsevent.WatchPathConfig `json:"watchPathConfig" protobuf:"bytes,2,name=watchPathConfig"`
+}
+
+// ResourceEventType is the type of event for the K8s resource mutation
+type ResourceEventType string
+
+// possible values of ResourceEventType
+const (
+ ADD ResourceEventType = "ADD"
+ UPDATE ResourceEventType = "UPDATE"
+ DELETE ResourceEventType = "DELETE"
+)
+
+// ResourceEventSource refers to a event-source for K8s resource related events.
+type ResourceEventSource struct {
+ // Namespace where resource is deployed
+ Namespace string `json:"namespace" protobuf:"bytes,1,name=namespace"`
+ // Filter is applied on the metadata of the resource
+ // +optional
+ Filter *ResourceFilter `json:"filter,omitempty" protobuf:"bytes,2,opt,name=filter"`
+ // Group of the resource
+ metav1.GroupVersionResource `json:",inline"`
+ // Type is the event type.
+ // If not provided, the gateway will watch all events for a resource.
+ // +optional
+ EventType ResourceEventType `json:"eventType,omitempty" protobuf:"bytes,3,opt,name=eventType"`
+}
+
+// ResourceFilter contains K8 ObjectMeta information to further filter resource event objects
+type ResourceFilter struct {
+ // +optional
+ Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"`
+ // +optional
+ Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,2,opt,name=labels"`
+ // +optional
+ Fields map[string]string `json:"fields,omitempty" protobuf:"bytes,3,opt,name=fields"`
+ // +optional
+ CreatedBy metav1.Time `json:"createdBy,omitempty" protobuf:"bytes,4,opt,name=createdBy"`
+}
+
+// AMQPEventSource refers to an event-source for AMQP stream events
+type AMQPEventSource struct {
+ // URL for rabbitmq service
+ URL string `json:"url" protobuf:"bytes,1,name=url"`
+ // ExchangeName is the exchange name
+ // For more information, visit https://www.rabbitmq.com/tutorials/amqp-concepts.html
+ ExchangeName string `json:"exchangeName" protobuf:"bytes,2,name=exchangeName"`
+ // ExchangeType is rabbitmq exchange type
+ ExchangeType string `json:"exchangeType" protobuf:"bytes,3,name=exchangeType"`
+ // Routing key for bindings
+ RoutingKey string `json:"routingKey" protobuf:"bytes,4,name=routingKey"`
+ // Backoff holds parameters applied to connection.
+ // +optional
+ ConnectionBackoff *common.Backoff `json:"connectionBackoff,omitempty" protobuf:"bytes,5,opt,name=connectionBackoff"`
+}
+
+// KafkaEventSource refers to event-source for Kafka related events
+type KafkaEventSource struct {
+ // URL to kafka cluster
+ URL string `json:"url" protobuf:"bytes,1,name=url"`
+ // Partition name
+ Partition string `json:"partition" protobuf:"bytes,2,name=partition"`
+ // Topic name
+ Topic string `json:"topic" protobuf:"bytes,3,name=topic"`
+ // Backoff holds parameters applied to connection.
+ ConnectionBackoff *common.Backoff `json:"connectionBackoff,omitempty" protobuf:"bytes,4,opt,name=connectionBackoff"`
+}
+
+// MQTTEventSource refers to event-source for MQTT related events
+type MQTTEventSource struct {
+ // URL to connect to broker
+ URL string `json:"url" protobuf:"bytes,1,name=url"`
+ // Topic name
+ Topic string `json:"topic" protobuf:"bytes,2,name=topic"`
+ // ClientID is the id of the client
+ ClientId string `json:"clientId" protobuf:"bytes,3,name=clientId"`
+ // ConnectionBackoff holds backoff applied to connection.
+ ConnectionBackoff *common.Backoff `json:"connectionBackoff,omitempty" protobuf:"bytes,4,opt,name=connectionBackoff"`
+}
+
+// NATSEventSource refers to event-source for NATS related events
+type NATSEventsSource struct {
+ // URL to connect to NATS cluster
+ URL string `json:"url" protobuf:"bytes,1,name=url"`
+ // Subject holds the name of the subject onto which messages are published
+ Subject string `json:"subject" protobuf:"bytes,2,name=2"`
+ // ConnectionBackoff holds backoff applied to connection.
+ ConnectionBackoff *common.Backoff `json:"connectionBackoff,omitempty" protobuf:"bytes,3,opt,name=connectionBackoff"`
+}
+
+// SNSEventSource refers to event-source for AWS SNS related events
+type SNSEventSource struct {
+ // Webhook configuration for http server
+ Webhook *webhook.Context `json:"webhook" protobuf:"bytes,1,name=webhook"`
+ // TopicArn
+ TopicArn string `json:"topicArn" protobuf:"bytes,2,name=topicArn"`
+ // AccessKey refers K8 secret containing aws access key
+ AccessKey *corev1.SecretKeySelector `json:"accessKey,omitempty" protobuf:"bytes,3,opt,name=accessKey"`
+ // SecretKey refers K8 secret containing aws secret key
+ SecretKey *corev1.SecretKeySelector `json:"secretKey,omitempty" protobuf:"bytes,4,opt,name=secretKey"`
+ // Namespace refers to Kubernetes namespace to read access related secret from.
+ // +optional
+ Namespace string `json:"namespace,omitempty" protobuf:"bytes,5,opt,name=namespace"`
+ // Region is AWS region
+ Region string `json:"region" protobuf:"bytes,6,name=region"`
+}
+
+// SQSEventSource refers to event-source for AWS SQS related events
+type SQSEventSource struct {
+ // AccessKey refers K8 secret containing aws access key
+ AccessKey *corev1.SecretKeySelector `json:"accessKey,omitempty" protobuf:"bytes,1,opt,name=accessKey"`
+ // SecretKey refers K8 secret containing aws secret key
+ SecretKey *corev1.SecretKeySelector `json:"secretKey,omitempty" protobuf:"bytes,2,opt,name=accessKey"`
+ // Region is AWS region
+ Region string `json:"region" protobuf:"bytes,3,name=region"`
+ // Queue is AWS SQS queue to listen to for messages
+ Queue string `json:"queue" protobuf:"bytes,4,name=queue"`
+ // WaitTimeSeconds is The duration (in seconds) for which the call waits for a message to arrive
+ // in the queue before returning.
+ WaitTimeSeconds int64 `json:"waitTimeSeconds" protobuf:"bytes,5,name=waitTimeSeconds"`
+ // Namespace refers to Kubernetes namespace to read access related secret from.
+ // +optional
+ Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"`
+}
+
+// PubSubEventSource refers to event-source for GCP PubSub related events.
+type PubSubEventSource struct {
+ // ProjectID is the unique identifier for your project on GCP
+ ProjectID string `json:"projectID" protobuf:"bytes,1,name=projectID"`
+ // TopicProjectID identifies the project where the topic should exist or be created
+ // (assumed to be the same as ProjectID by default)
+ TopicProjectID string `json:"topicProjectID" protobuf:"bytes,2,name=topicProjectID"`
+ // Topic on which a subscription will be created
+ Topic string `json:"topic" protobuf:"bytes,3,name=topic"`
+ // CredentialsFile is the file that contains credentials to authenticate for GCP
+ CredentialsFile string `json:"credentialsFile" protobuf:"bytes,4,name=credentialsFile"`
+ // DeleteSubscriptionOnFinish determines whether to delete the GCP PubSub subscription once the event source is stopped.
+ // +optional
+ DeleteSubscriptionOnFinish bool `json:"deleteSubscriptionOnFinish,omitempty" protobuf:"bytes,1,opt,name=deleteSubscriptionOnFinish"`
+}
+
+// GithubEventSource refers to event-source for github related events
+type GithubEventSource struct {
+ // Id is the webhook's id
+ Id int64 `json:"id" protobuf:"bytes,1,name=id"`
+ // Webhook refers to the configuration required to run a http server
+ Webhook *webhook.Context `json:"webhook" protobuf:"bytes,2,name=webhook"`
+ // Owner refers to GitHub owner name i.e. argoproj
+ Owner string `json:"owner" protobuf:"bytes,3,name=owner"`
+ // Repository refers to GitHub repo name i.e. argo-events
+ Repository string `json:"repository" protobuf:"bytes,4,name=repository"`
+ // Events refer to Github events to subscribe to which the gateway will subscribe
+ // +listType=string
+ Events []string `json:"events" protobuf:"bytes,5,rep,name=events"`
+ // APIToken refers to a K8s secret containing github api token
+ APIToken *corev1.SecretKeySelector `json:"apiToken"`
+ // WebhookSecret refers to K8s secret containing GitHub webhook secret
+ // https://developer.github.com/webhooks/securing/
+ // +optional
+ WebhookSecret *corev1.SecretKeySelector `json:"webhookSecret,omitempty" protobuf:"bytes,7,opt,name=webhookSecret"`
+ // Insecure tls verification
+ Insecure bool `json:"insecure,omitempty" protobuf:"bytes,8,opt,name=insecure"`
+ // Active refers to status of the webhook for event deliveries.
+ // https://developer.github.com/webhooks/creating/#active
+ // +optional
+ Active bool `json:"active,omitempty" protobuf:"bytes,9,opt,name=active"`
+ // ContentType of the event delivery
+ ContentType string `json:"contentType,omitempty" protobuf:"bytes,10,opt,name=contentType"`
+ // GitHub base URL (for GitHub Enterprise)
+ // +optional
+ GithubBaseURL string `json:"githubBaseURL,omitempty" protobuf:"bytes,11,opt,name=githubBaseURL"`
+ // GitHub upload URL (for GitHub Enterprise)
+ // +optional
+ GithubUploadURL string `json:"githubUploadURL,omitempty" protobuf:"bytes,12,opt,name=githubUploadURL"`
+ // Namespace refers to Kubernetes namespace which is used to retrieve webhook secret and api token from.
+ Namespace string `json:"namespace" protobuf:"bytes,13,name=namespace"`
+ // DeleteHookOnFinish determines whether to delete the GitHub hook for the repository once the event source is stopped.
+ // +optional
+ DeleteHookOnFinish bool `json:"deleteHookOnFinish,omitempty" protobuf:"bytes,14,opt,name=deleteHookOnFinish"`
+}
+
+// GitlabEventSource refers to event-source related to Gitlab events
+type GitlabEventSource struct {
+ // Webhook holds configuration to run a http server
+ Webhook *webhook.Context `json:"webhook" protobuf:"bytes,1,name=webhook"`
+ // ProjectId is the id of project for which integration needs to setup
+ ProjectId string `json:"projectId" protobuf:"bytes,2,name=projectId"`
+ // Event is a gitlab event to listen to.
+ // Refer https://github.com/xanzy/go-gitlab/blob/bf34eca5d13a9f4c3f501d8a97b8ac226d55e4d9/projects.go#L794.
+ Event string `json:"event" protobuf:"bytes,3,name=event"`
+ // AccessToken is reference to k8 secret which holds the gitlab api access information
+ AccessToken *corev1.SecretKeySelector `json:"accessToken" protobuf:"bytes,4,name=accessToken"`
+ // EnableSSLVerification to enable ssl verification
+ // +optional
+ EnableSSLVerification bool `json:"enableSSLVerification,omitempty" protobuf:"bytes,5,opt,name=enableSSLVerification"`
+ // GitlabBaseURL is the base URL for API requests to a custom endpoint
+ GitlabBaseURL string `json:"gitlabBaseURL" protobuf:"bytes,6,name=gitlabBaseURL"`
+ // Namespace refers to Kubernetes namespace which is used to retrieve access token from.
+ Namespace string `json:"namespace" protobuf:"bytes,7,name=namespace"`
+ // DeleteHookOnFinish determines whether to delete the GitLab hook for the project once the event source is stopped.
+ // +optional
+ DeleteHookOnFinish bool `json:"deleteHookOnFinish,omitempty" protobuf:"bytes,8,opt,name=deleteHookOnFinish"`
+}
+
+// HDFSEventSource refers to event-source for HDFS related events
+type HDFSEventSource struct {
+ fsevent.WatchPathConfig `json:",inline"`
+ // Type of file operations to watch
+ Type string `json:"type"`
+ // CheckInterval is a string that describes an interval duration to check the directory state, e.g. 1s, 30m, 2h... (defaults to 1m)
+ CheckInterval string `json:"checkInterval,omitempty"`
+ // Addresses is accessible addresses of HDFS name nodes
+ // +listType=string
+ Addresses []string `json:"addresses"`
+ // HDFSUser is the user to access HDFS file system.
+ // It is ignored if either ccache or keytab is used.
+ HDFSUser string `json:"hdfsUser,omitempty"`
+ // KrbCCacheSecret is the secret selector for Kerberos ccache
+ // Either ccache or keytab can be set to use Kerberos.
+ KrbCCacheSecret *corev1.SecretKeySelector `json:"krbCCacheSecret,omitempty"`
+ // KrbKeytabSecret is the secret selector for Kerberos keytab
+ // Either ccache or keytab can be set to use Kerberos.
+ KrbKeytabSecret *corev1.SecretKeySelector `json:"krbKeytabSecret,omitempty"`
+ // KrbUsername is the Kerberos username used with Kerberos keytab
+ // It must be set if keytab is used.
+ KrbUsername string `json:"krbUsername,omitempty"`
+ // KrbRealm is the Kerberos realm used with Kerberos keytab
+ // It must be set if keytab is used.
+ KrbRealm string `json:"krbRealm,omitempty"`
+ // KrbConfig is the configmap selector for Kerberos config as string
+ // It must be set if either ccache or keytab is used.
+ KrbConfigConfigMap *corev1.ConfigMapKeySelector `json:"krbConfigConfigMap,omitempty"`
+ // KrbServicePrincipalName is the principal name of Kerberos service
+ // It must be set if either ccache or keytab is used.
+ KrbServicePrincipalName string `json:"krbServicePrincipalName,omitempty"`
+ // Namespace refers to Kubernetes namespace which is used to retrieve cache secret and ket tab secret from.
+ Namespace string `json:"namespace" protobuf:"bytes,1,name=namespace"`
+}
+
+// SlackEventSource refers to event-source for Slack related events
+type SlackEventSource struct {
+ // Slack App signing secret
+ SigningSecret *corev1.SecretKeySelector `json:"signingSecret,omitempty" protobuf:"bytes,1,opt,name=signingSecret"`
+ // Token for URL verification handshake
+ Token *corev1.SecretKeySelector `json:"token,omitempty" protobuf:"bytes,2,name=token"`
+ // Webhook holds configuration for a REST endpoint
+ Webhook *webhook.Context `json:"webhook" protobuf:"bytes,3,name=webhook"`
+ // Namespace refers to Kubernetes namespace which is used to retrieve token and signing secret from.
+ Namespace string `json:"namespace" protobuf:"bytes,4,name=namespace"`
+}
+
+// StorageGridEventSource refers to event-source for StorageGrid related events
+type StorageGridEventSource struct {
+ // Webhook holds configuration for a REST endpoint
+ Webhook *webhook.Context `json:"webhook" protobuf:"bytes,1,name=webhook"`
+ // Events are s3 bucket notification events.
+ // For more information on s3 notifications, follow https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
+ // Note that storage grid notifications do not contain `s3:`
+ // +listType=string
+ Events []string `json:"events,omitempty" protobuf:"bytes,2,opt,name=events"`
+ // Filter on object key which caused the notification.
+ Filter *StorageGridFilter `json:"filter,omitempty" protobuf:"bytes,3,opt,name=filter"`
+}
+
+// Filter represents filters to apply to bucket notifications for specifying constraints on objects
+// +k8s:openapi-gen=true
+type StorageGridFilter struct {
+ Prefix string `json:"prefix"`
+ Suffix string `json:"suffix"`
+}
+
+// EventSourceStatus holds the status of the event-source resource
+type EventSourceStatus struct {
+ CreatedAt metav1.Time `json:"createdAt,omitempty" protobuf:"bytes,1,opt,name=createdAt"`
+}
diff --git a/gateways/community/gcp-pubsub/cmd/main.go b/pkg/apis/eventsources/v1alpha1/validate.go
similarity index 62%
rename from gateways/community/gcp-pubsub/cmd/main.go
rename to pkg/apis/eventsources/v1alpha1/validate.go
index 53dc11c4bd..46c1112d29 100644
--- a/gateways/community/gcp-pubsub/cmd/main.go
+++ b/pkg/apis/eventsources/v1alpha1/validate.go
@@ -13,17 +13,19 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
-
-package main
+package v1alpha1
import (
- "github.com/argoproj/argo-events/common"
- "github.com/argoproj/argo-events/gateways"
- "github.com/argoproj/argo-events/gateways/community/gcp-pubsub"
+ "github.com/pkg/errors"
)
-func main() {
- gateways.StartGateway(&pubsub.GcpPubSubEventSourceExecutor{
- Log: common.NewArgoEventsLogger(),
- })
+// ValidateEventSource validates a generic event source
+func ValidateEventSource(eventSource *EventSource) error {
+ if eventSource == nil {
+ return errors.New("event source can't be nil")
+ }
+ if eventSource.Spec == nil {
+ return errors.New("event source specification can't be nil")
+ }
+ return nil
}
diff --git a/pkg/apis/eventsources/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/eventsources/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..78cd2fd942
--- /dev/null
+++ b/pkg/apis/eventsources/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,681 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ json "encoding/json"
+
+ common "github.com/argoproj/argo-events/common"
+ webhook "github.com/argoproj/argo-events/gateways/server/common/webhook"
+ apiscommon "github.com/argoproj/argo-events/pkg/apis/common"
+ v1 "k8s.io/api/core/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AMQPEventSource) DeepCopyInto(out *AMQPEventSource) {
+ *out = *in
+ if in.ConnectionBackoff != nil {
+ in, out := &in.ConnectionBackoff, &out.ConnectionBackoff
+ *out = new(common.Backoff)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AMQPEventSource.
+func (in *AMQPEventSource) DeepCopy() *AMQPEventSource {
+ if in == nil {
+ return nil
+ }
+ out := new(AMQPEventSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CalendarEventSource) DeepCopyInto(out *CalendarEventSource) {
+ *out = *in
+ if in.ExclusionDates != nil {
+ in, out := &in.ExclusionDates, &out.ExclusionDates
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.UserPayload != nil {
+ in, out := &in.UserPayload, &out.UserPayload
+ *out = new(json.RawMessage)
+ if **in != nil {
+ in, out := *in, *out
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CalendarEventSource.
+func (in *CalendarEventSource) DeepCopy() *CalendarEventSource {
+ if in == nil {
+ return nil
+ }
+ out := new(CalendarEventSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventSource) DeepCopyInto(out *EventSource) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Status.DeepCopyInto(&out.Status)
+ if in.Spec != nil {
+ in, out := &in.Spec, &out.Spec
+ *out = new(EventSourceSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSource.
+func (in *EventSource) DeepCopy() *EventSource {
+ if in == nil {
+ return nil
+ }
+ out := new(EventSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EventSource) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventSourceList) DeepCopyInto(out *EventSourceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]EventSource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSourceList.
+func (in *EventSourceList) DeepCopy() *EventSourceList {
+ if in == nil {
+ return nil
+ }
+ out := new(EventSourceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EventSourceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventSourceSpec) DeepCopyInto(out *EventSourceSpec) {
+ *out = *in
+ if in.Minio != nil {
+ in, out := &in.Minio, &out.Minio
+ *out = make(map[string]apiscommon.S3Artifact, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ if in.Calendar != nil {
+ in, out := &in.Calendar, &out.Calendar
+ *out = make(map[string]CalendarEventSource, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ if in.File != nil {
+ in, out := &in.File, &out.File
+ *out = make(map[string]FileEventSource, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Resource != nil {
+ in, out := &in.Resource, &out.Resource
+ *out = make(map[string]ResourceEventSource, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ if in.Webhook != nil {
+ in, out := &in.Webhook, &out.Webhook
+ *out = make(map[string]webhook.Context, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.AMQP != nil {
+ in, out := &in.AMQP, &out.AMQP
+ *out = make(map[string]AMQPEventSource, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ if in.Kafka != nil {
+ in, out := &in.Kafka, &out.Kafka
+ *out = make(map[string]KafkaEventSource, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ if in.MQTT != nil {
+ in, out := &in.MQTT, &out.MQTT
+ *out = make(map[string]MQTTEventSource, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ if in.NATS != nil {
+ in, out := &in.NATS, &out.NATS
+ *out = make(map[string]NATSEventsSource, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ if in.SNS != nil {
+ in, out := &in.SNS, &out.SNS
+ *out = make(map[string]SNSEventSource, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ if in.SQS != nil {
+ in, out := &in.SQS, &out.SQS
+ *out = make(map[string]SQSEventSource, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ if in.PubSub != nil {
+ in, out := &in.PubSub, &out.PubSub
+ *out = make(map[string]PubSubEventSource, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Github != nil {
+ in, out := &in.Github, &out.Github
+ *out = make(map[string]GithubEventSource, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ if in.Gitlab != nil {
+ in, out := &in.Gitlab, &out.Gitlab
+ *out = make(map[string]GitlabEventSource, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ if in.HDFS != nil {
+ in, out := &in.HDFS, &out.HDFS
+ *out = make(map[string]HDFSEventSource, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ if in.Slack != nil {
+ in, out := &in.Slack, &out.Slack
+ *out = make(map[string]SlackEventSource, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ if in.StorageGrid != nil {
+ in, out := &in.StorageGrid, &out.StorageGrid
+ *out = make(map[string]StorageGridEventSource, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSourceSpec.
+func (in *EventSourceSpec) DeepCopy() *EventSourceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(EventSourceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventSourceStatus) DeepCopyInto(out *EventSourceStatus) {
+ *out = *in
+ in.CreatedAt.DeepCopyInto(&out.CreatedAt)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSourceStatus.
+func (in *EventSourceStatus) DeepCopy() *EventSourceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(EventSourceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FileEventSource) DeepCopyInto(out *FileEventSource) {
+ *out = *in
+ out.WatchPathConfig = in.WatchPathConfig
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileEventSource.
+func (in *FileEventSource) DeepCopy() *FileEventSource {
+ if in == nil {
+ return nil
+ }
+ out := new(FileEventSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GithubEventSource) DeepCopyInto(out *GithubEventSource) {
+ *out = *in
+ if in.Webhook != nil {
+ in, out := &in.Webhook, &out.Webhook
+ *out = new(webhook.Context)
+ **out = **in
+ }
+ if in.Events != nil {
+ in, out := &in.Events, &out.Events
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.APIToken != nil {
+ in, out := &in.APIToken, &out.APIToken
+ *out = new(v1.SecretKeySelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.WebhookSecret != nil {
+ in, out := &in.WebhookSecret, &out.WebhookSecret
+ *out = new(v1.SecretKeySelector)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GithubEventSource.
+func (in *GithubEventSource) DeepCopy() *GithubEventSource {
+ if in == nil {
+ return nil
+ }
+ out := new(GithubEventSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitlabEventSource) DeepCopyInto(out *GitlabEventSource) {
+ *out = *in
+ if in.Webhook != nil {
+ in, out := &in.Webhook, &out.Webhook
+ *out = new(webhook.Context)
+ **out = **in
+ }
+ if in.AccessToken != nil {
+ in, out := &in.AccessToken, &out.AccessToken
+ *out = new(v1.SecretKeySelector)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitlabEventSource.
+func (in *GitlabEventSource) DeepCopy() *GitlabEventSource {
+ if in == nil {
+ return nil
+ }
+ out := new(GitlabEventSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HDFSEventSource) DeepCopyInto(out *HDFSEventSource) {
+ *out = *in
+ out.WatchPathConfig = in.WatchPathConfig
+ if in.Addresses != nil {
+ in, out := &in.Addresses, &out.Addresses
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.KrbCCacheSecret != nil {
+ in, out := &in.KrbCCacheSecret, &out.KrbCCacheSecret
+ *out = new(v1.SecretKeySelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.KrbKeytabSecret != nil {
+ in, out := &in.KrbKeytabSecret, &out.KrbKeytabSecret
+ *out = new(v1.SecretKeySelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.KrbConfigConfigMap != nil {
+ in, out := &in.KrbConfigConfigMap, &out.KrbConfigConfigMap
+ *out = new(v1.ConfigMapKeySelector)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HDFSEventSource.
+func (in *HDFSEventSource) DeepCopy() *HDFSEventSource {
+ if in == nil {
+ return nil
+ }
+ out := new(HDFSEventSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KafkaEventSource) DeepCopyInto(out *KafkaEventSource) {
+ *out = *in
+ if in.ConnectionBackoff != nil {
+ in, out := &in.ConnectionBackoff, &out.ConnectionBackoff
+ *out = new(common.Backoff)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaEventSource.
+func (in *KafkaEventSource) DeepCopy() *KafkaEventSource {
+ if in == nil {
+ return nil
+ }
+ out := new(KafkaEventSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MQTTEventSource) DeepCopyInto(out *MQTTEventSource) {
+ *out = *in
+ if in.ConnectionBackoff != nil {
+ in, out := &in.ConnectionBackoff, &out.ConnectionBackoff
+ *out = new(common.Backoff)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MQTTEventSource.
+func (in *MQTTEventSource) DeepCopy() *MQTTEventSource {
+ if in == nil {
+ return nil
+ }
+ out := new(MQTTEventSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NATSEventsSource) DeepCopyInto(out *NATSEventsSource) {
+ *out = *in
+ if in.ConnectionBackoff != nil {
+ in, out := &in.ConnectionBackoff, &out.ConnectionBackoff
+ *out = new(common.Backoff)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NATSEventsSource.
+func (in *NATSEventsSource) DeepCopy() *NATSEventsSource {
+ if in == nil {
+ return nil
+ }
+ out := new(NATSEventsSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PubSubEventSource) DeepCopyInto(out *PubSubEventSource) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PubSubEventSource.
+func (in *PubSubEventSource) DeepCopy() *PubSubEventSource {
+ if in == nil {
+ return nil
+ }
+ out := new(PubSubEventSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceEventSource) DeepCopyInto(out *ResourceEventSource) {
+ *out = *in
+ if in.Filter != nil {
+ in, out := &in.Filter, &out.Filter
+ *out = new(ResourceFilter)
+ (*in).DeepCopyInto(*out)
+ }
+ out.GroupVersionResource = in.GroupVersionResource
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceEventSource.
+func (in *ResourceEventSource) DeepCopy() *ResourceEventSource {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceEventSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceFilter) DeepCopyInto(out *ResourceFilter) {
+ *out = *in
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Fields != nil {
+ in, out := &in.Fields, &out.Fields
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ in.CreatedBy.DeepCopyInto(&out.CreatedBy)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFilter.
+func (in *ResourceFilter) DeepCopy() *ResourceFilter {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceFilter)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SNSEventSource) DeepCopyInto(out *SNSEventSource) {
+ *out = *in
+ if in.Webhook != nil {
+ in, out := &in.Webhook, &out.Webhook
+ *out = new(webhook.Context)
+ **out = **in
+ }
+ if in.AccessKey != nil {
+ in, out := &in.AccessKey, &out.AccessKey
+ *out = new(v1.SecretKeySelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SecretKey != nil {
+ in, out := &in.SecretKey, &out.SecretKey
+ *out = new(v1.SecretKeySelector)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SNSEventSource.
+func (in *SNSEventSource) DeepCopy() *SNSEventSource {
+ if in == nil {
+ return nil
+ }
+ out := new(SNSEventSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SQSEventSource) DeepCopyInto(out *SQSEventSource) {
+ *out = *in
+ if in.AccessKey != nil {
+ in, out := &in.AccessKey, &out.AccessKey
+ *out = new(v1.SecretKeySelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SecretKey != nil {
+ in, out := &in.SecretKey, &out.SecretKey
+ *out = new(v1.SecretKeySelector)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQSEventSource.
+func (in *SQSEventSource) DeepCopy() *SQSEventSource {
+ if in == nil {
+ return nil
+ }
+ out := new(SQSEventSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SlackEventSource) DeepCopyInto(out *SlackEventSource) {
+ *out = *in
+ if in.SigningSecret != nil {
+ in, out := &in.SigningSecret, &out.SigningSecret
+ *out = new(v1.SecretKeySelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Token != nil {
+ in, out := &in.Token, &out.Token
+ *out = new(v1.SecretKeySelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Webhook != nil {
+ in, out := &in.Webhook, &out.Webhook
+ *out = new(webhook.Context)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlackEventSource.
+func (in *SlackEventSource) DeepCopy() *SlackEventSource {
+ if in == nil {
+ return nil
+ }
+ out := new(SlackEventSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageGridEventSource) DeepCopyInto(out *StorageGridEventSource) {
+ *out = *in
+ if in.Webhook != nil {
+ in, out := &in.Webhook, &out.Webhook
+ *out = new(webhook.Context)
+ **out = **in
+ }
+ if in.Events != nil {
+ in, out := &in.Events, &out.Events
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Filter != nil {
+ in, out := &in.Filter, &out.Filter
+ *out = new(StorageGridFilter)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageGridEventSource.
+func (in *StorageGridEventSource) DeepCopy() *StorageGridEventSource {
+ if in == nil {
+ return nil
+ }
+ out := new(StorageGridEventSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageGridFilter) DeepCopyInto(out *StorageGridFilter) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageGridFilter.
+func (in *StorageGridFilter) DeepCopy() *StorageGridFilter {
+ if in == nil {
+ return nil
+ }
+ out := new(StorageGridFilter)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/pkg/apis/gateway/v1alpha1/openapi_generated.go b/pkg/apis/gateway/v1alpha1/openapi_generated.go
index 58aadfc89b..33be14ca54 100644
--- a/pkg/apis/gateway/v1alpha1/openapi_generated.go
+++ b/pkg/apis/gateway/v1alpha1/openapi_generated.go
@@ -22,15 +22,17 @@ limitations under the License.
package v1alpha1
import (
- "github.com/go-openapi/spec"
- "k8s.io/kube-openapi/pkg/common"
+ spec "github.com/go-openapi/spec"
+ common "k8s.io/kube-openapi/pkg/common"
)
func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
return map[string]common.OpenAPIDefinition{
+ "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1.EventSourceRef": schema_pkg_apis_gateway_v1alpha1_EventSourceRef(ref),
"github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1.Gateway": schema_pkg_apis_gateway_v1alpha1_Gateway(ref),
"github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1.GatewayList": schema_pkg_apis_gateway_v1alpha1_GatewayList(ref),
"github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1.GatewayNotificationWatcher": schema_pkg_apis_gateway_v1alpha1_GatewayNotificationWatcher(ref),
+ "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1.GatewayResource": schema_pkg_apis_gateway_v1alpha1_GatewayResource(ref),
"github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1.GatewaySpec": schema_pkg_apis_gateway_v1alpha1_GatewaySpec(ref),
"github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1.GatewayStatus": schema_pkg_apis_gateway_v1alpha1_GatewayStatus(ref),
"github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1.NodeStatus": schema_pkg_apis_gateway_v1alpha1_NodeStatus(ref),
@@ -39,6 +41,34 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
}
}
+func schema_pkg_apis_gateway_v1alpha1_EventSourceRef(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "EventSourceRef holds information about the EventSourceRef custom resource",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Name of the event source",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "namespace": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Namespace of the event source Default value is the namespace where referencing gateway is deployed",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"name"},
+ },
+ },
+ }
+}
+
func schema_pkg_apis_gateway_v1alpha1_Gateway(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
@@ -178,6 +208,34 @@ func schema_pkg_apis_gateway_v1alpha1_GatewayNotificationWatcher(ref common.Refe
}
}
+func schema_pkg_apis_gateway_v1alpha1_GatewayResource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "GatewayResource holds the metadata about the gateway resources",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "deployment": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Metadata of the deployment for the gateway",
+ Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
+ },
+ },
+ "service": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Metadata of the service for the gateway",
+ Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
+ },
+ },
+ },
+ Required: []string{"deployment"},
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
+ }
+}
+
func schema_pkg_apis_gateway_v1alpha1_GatewaySpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
@@ -191,11 +249,10 @@ func schema_pkg_apis_gateway_v1alpha1_GatewaySpec(ref common.ReferenceCallback)
Ref: ref("k8s.io/api/core/v1.PodTemplateSpec"),
},
},
- "eventSource": {
+ "eventSourceRef": {
SchemaProps: spec.SchemaProps{
- Description: "EventSource is name of the configmap that stores event source configurations for the gateway",
- Type: []string{"string"},
- Format: "",
+ Description: "EventSourceRef refers to event-source that stores event source configurations for the gateway",
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1.EventSourceRef"),
},
},
"type": {
@@ -208,7 +265,7 @@ func schema_pkg_apis_gateway_v1alpha1_GatewaySpec(ref common.ReferenceCallback)
"service": {
SchemaProps: spec.SchemaProps{
Description: "Service is the specifications of the service to expose the gateway Refer https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#service-v1-core",
- Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.ServiceTemplateSpec"),
+ Ref: ref("k8s.io/api/core/v1.Service"),
},
},
"watchers": {
@@ -230,12 +287,19 @@ func schema_pkg_apis_gateway_v1alpha1_GatewaySpec(ref common.ReferenceCallback)
Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.EventProtocol"),
},
},
+ "replica": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Replica is the gateway deployment replicas",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
},
Required: []string{"template", "type", "processorPort", "eventProtocol"},
},
},
Dependencies: []string{
- "github.com/argoproj/argo-events/pkg/apis/common.EventProtocol", "github.com/argoproj/argo-events/pkg/apis/common.ServiceTemplateSpec", "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1.NotificationWatchers", "k8s.io/api/core/v1.PodTemplateSpec"},
+ "github.com/argoproj/argo-events/pkg/apis/common.EventProtocol", "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1.EventSourceRef", "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1.NotificationWatchers", "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/api/core/v1.Service"},
}
}
@@ -280,12 +344,18 @@ func schema_pkg_apis_gateway_v1alpha1_GatewayStatus(ref common.ReferenceCallback
},
},
},
+ "resources": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Resources refers to the metadata about the gateway resources",
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1.GatewayResource"),
+ },
+ },
},
- Required: []string{"phase"},
+ Required: []string{"phase", "resources"},
},
},
Dependencies: []string{
- "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1.NodeStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
+ "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1.GatewayResource", "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1.NodeStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
diff --git a/pkg/apis/gateway/v1alpha1/types.go b/pkg/apis/gateway/v1alpha1/types.go
index 3e1c652f4f..7f4a69befe 100644
--- a/pkg/apis/gateway/v1alpha1/types.go
+++ b/pkg/apis/gateway/v1alpha1/types.go
@@ -17,14 +17,11 @@ limitations under the License.
package v1alpha1
import (
- "github.com/argoproj/argo-events/pkg/apis/common"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-// Gateway version
-const ArgoEventsGatewayVersion = "v0.11"
-
// NodePhase is the label for the condition of a node.
type NodePhase string
@@ -63,44 +60,58 @@ type GatewaySpec struct {
// Template is the pod specification for the gateway
// Refer https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#pod-v1-core
Template *corev1.PodTemplateSpec `json:"template" protobuf:"bytes,1,opt,name=template"`
-
- // EventSource is name of the configmap that stores event source configurations for the gateway
- EventSource string `json:"eventSource,omitempty" protobuf:"bytes,2,opt,name=eventSource"`
-
+ // EventSourceRef refers to event-source that stores event source configurations for the gateway
+ EventSourceRef *EventSourceRef `json:"eventSourceRef,omitempty" protobuf:"bytes,2,opt,name=eventSourceRef"`
// Type is the type of gateway. Used as metadata.
- Type string `json:"type" protobuf:"bytes,3,opt,name=type"`
-
+ Type apicommon.EventSourceType `json:"type" protobuf:"bytes,3,opt,name=type"`
// Service is the specifications of the service to expose the gateway
// Refer https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#service-v1-core
- Service *common.ServiceTemplateSpec `json:"service,omitempty" protobuf:"bytes,4,opt,name=service"`
-
+ Service *corev1.Service `json:"service,omitempty" protobuf:"bytes,4,opt,name=service"`
// Watchers are components which are interested listening to notifications from this gateway
// These only need to be specified when gateway dispatch mechanism is through HTTP POST notifications.
// In future, support for NATS, KAFKA will be added as a means to dispatch notifications in which case
// specifying watchers would be unnecessary.
Watchers *NotificationWatchers `json:"watchers,omitempty" protobuf:"bytes,5,opt,name=watchers"`
-
// Port on which the gateway event source processor is running on.
ProcessorPort string `json:"processorPort" protobuf:"bytes,6,opt,name=processorPort"`
-
// EventProtocol is the underlying protocol used to send events from gateway to watchers(components interested in listening to event from this gateway)
- EventProtocol *common.EventProtocol `json:"eventProtocol" protobuf:"bytes,7,opt,name=eventProtocol"`
+ EventProtocol *apicommon.EventProtocol `json:"eventProtocol" protobuf:"bytes,7,opt,name=eventProtocol"`
+ // Replica is the gateway deployment replicas
+ Replica int `json:"replica,omitempty" protobuf:"bytes,9,opt,name=replica"`
+}
+
+// EventSourceRef holds information about the EventSourceRef custom resource
+type EventSourceRef struct {
+ // Name of the event source
+ Name string `json:"name" protobuf:"bytes,1,name=name"`
+ // Namespace of the event source
+ // Default value is the namespace where referencing gateway is deployed
+ // +optional
+ Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
+}
+
+// GatewayResource holds the metadata about the gateway resources
+type GatewayResource struct {
+ // Metadata of the deployment for the gateway
+ Deployment *metav1.ObjectMeta `json:"deployment" protobuf:"bytes,1,name=deployment"`
+ // Metadata of the service for the gateway
+ // +optional
+ Service *metav1.ObjectMeta `json:"service,omitempty" protobuf:"bytes,2,opt,name=service"`
}
// GatewayStatus contains information about the status of a gateway.
type GatewayStatus struct {
// Phase is the high-level summary of the gateway
Phase NodePhase `json:"phase" protobuf:"bytes,1,opt,name=phase"`
-
// StartedAt is the time at which this gateway was initiated
StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,2,opt,name=startedAt"`
-
// Message is a human readable string indicating details about a gateway in its phase
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
-
// Nodes is a mapping between a node ID and the node's status
// it records the states for the configurations of gateway.
Nodes map[string]NodeStatus `json:"nodes,omitempty" protobuf:"bytes,5,rep,name=nodes"`
+ // Resources refers to the metadata about the gateway resources
+ Resources *GatewayResource `json:"resources" protobuf:"bytes,6,opt,name=resources"`
}
// NodeStatus describes the status for an individual node in the gateway configurations.
@@ -109,23 +120,17 @@ type NodeStatus struct {
// ID is a unique identifier of a node within a sensor
// It is a hash of the node name
ID string `json:"id" protobuf:"bytes,1,opt,name=id"`
-
// Name is a unique name in the node tree used to generate the node ID
Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
-
// DisplayName is the human readable representation of the node
DisplayName string `json:"displayName" protobuf:"bytes,5,opt,name=displayName"`
-
// Phase of the node
Phase NodePhase `json:"phase" protobuf:"bytes,6,opt,name=phase"`
-
// StartedAt is the time at which this node started
// +k8s:openapi-gen=false
StartedAt metav1.MicroTime `json:"startedAt,omitempty" protobuf:"bytes,7,opt,name=startedAt"`
-
// Message store data or something to save for configuration
Message string `json:"message,omitempty" protobuf:"bytes,8,opt,name=message"`
-
// UpdateTime is the time when node(gateway configuration) was updated
UpdateTime metav1.MicroTime `json:"updateTime,omitempty" protobuf:"bytes,9,opt,name=updateTime"`
}
@@ -135,7 +140,6 @@ type NotificationWatchers struct {
// +listType=gateways
// Gateways is the list of gateways interested in listening to notifications from this gateway
Gateways []GatewayNotificationWatcher `json:"gateways,omitempty" protobuf:"bytes,1,opt,name=gateways"`
-
// +listType=sensors
// Sensors is the list of sensors interested in listening to notifications from this gateway
Sensors []SensorNotificationWatcher `json:"sensors,omitempty" protobuf:"bytes,2,rep,name=sensors"`
@@ -145,14 +149,11 @@ type NotificationWatchers struct {
type GatewayNotificationWatcher struct {
// Name is the gateway name
Name string `json:"name" protobuf:"bytes,1,name=name"`
-
// Port is http server port on which gateway is running
Port string `json:"port" protobuf:"bytes,2,name=port"`
-
// Endpoint is REST API endpoint to post event to.
// Events are sent using HTTP POST method to this endpoint.
Endpoint string `json:"endpoint" protobuf:"bytes,3,name=endpoint"`
-
// Namespace of the gateway
// +Optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"`
@@ -162,7 +163,6 @@ type GatewayNotificationWatcher struct {
type SensorNotificationWatcher struct {
// Name is the name of the sensor
Name string `json:"name" protobuf:"bytes,1,name=name"`
-
// Namespace of the sensor
// +Optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
diff --git a/pkg/apis/gateway/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/gateway/v1alpha1/zz_generated.deepcopy.go
index 5b006677b8..c0a8582ecf 100644
--- a/pkg/apis/gateway/v1alpha1/zz_generated.deepcopy.go
+++ b/pkg/apis/gateway/v1alpha1/zz_generated.deepcopy.go
@@ -20,11 +20,28 @@ limitations under the License.
package v1alpha1
import (
- "github.com/argoproj/argo-events/pkg/apis/common"
- v1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/runtime"
+ common "github.com/argoproj/argo-events/pkg/apis/common"
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
)
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventSourceRef) DeepCopyInto(out *EventSourceRef) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSourceRef.
+func (in *EventSourceRef) DeepCopy() *EventSourceRef {
+ if in == nil {
+ return nil
+ }
+ out := new(EventSourceRef)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Gateway) DeepCopyInto(out *Gateway) {
*out = *in
@@ -102,17 +119,48 @@ func (in *GatewayNotificationWatcher) DeepCopy() *GatewayNotificationWatcher {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GatewayResource) DeepCopyInto(out *GatewayResource) {
+ *out = *in
+ if in.Deployment != nil {
+ in, out := &in.Deployment, &out.Deployment
+ *out = new(v1.ObjectMeta)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Service != nil {
+ in, out := &in.Service, &out.Service
+ *out = new(v1.ObjectMeta)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayResource.
+func (in *GatewayResource) DeepCopy() *GatewayResource {
+ if in == nil {
+ return nil
+ }
+ out := new(GatewayResource)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GatewaySpec) DeepCopyInto(out *GatewaySpec) {
*out = *in
if in.Template != nil {
in, out := &in.Template, &out.Template
- *out = new(v1.PodTemplateSpec)
+ *out = new(corev1.PodTemplateSpec)
(*in).DeepCopyInto(*out)
}
+ if in.EventSourceRef != nil {
+ in, out := &in.EventSourceRef, &out.EventSourceRef
+ *out = new(EventSourceRef)
+ **out = **in
+ }
if in.Service != nil {
in, out := &in.Service, &out.Service
- *out = new(common.ServiceTemplateSpec)
+ *out = new(corev1.Service)
(*in).DeepCopyInto(*out)
}
if in.Watchers != nil {
@@ -149,6 +197,11 @@ func (in *GatewayStatus) DeepCopyInto(out *GatewayStatus) {
(*out)[key] = *val.DeepCopy()
}
}
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = new(GatewayResource)
+ (*in).DeepCopyInto(*out)
+ }
return
}
diff --git a/pkg/apis/sensor/v1alpha1/openapi_generated.go b/pkg/apis/sensor/v1alpha1/openapi_generated.go
index e167bf7c61..281f67055b 100644
--- a/pkg/apis/sensor/v1alpha1/openapi_generated.go
+++ b/pkg/apis/sensor/v1alpha1/openapi_generated.go
@@ -42,6 +42,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.NodeStatus": schema_pkg_apis_sensor_v1alpha1_NodeStatus(ref),
"github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.Sensor": schema_pkg_apis_sensor_v1alpha1_Sensor(ref),
"github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SensorList": schema_pkg_apis_sensor_v1alpha1_SensorList(ref),
+ "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SensorResources": schema_pkg_apis_sensor_v1alpha1_SensorResources(ref),
"github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SensorSpec": schema_pkg_apis_sensor_v1alpha1_SensorSpec(ref),
"github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SensorStatus": schema_pkg_apis_sensor_v1alpha1_SensorStatus(ref),
"github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TimeFilter": schema_pkg_apis_sensor_v1alpha1_TimeFilter(ref),
@@ -60,43 +61,43 @@ func schema_pkg_apis_sensor_v1alpha1_ArtifactLocation(ref common.ReferenceCallba
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
- Description: "ArtifactLocation describes the source location for an external artifact",
+ Description: "ArtifactLocation describes the source location for an external minio",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"s3": {
SchemaProps: spec.SchemaProps{
- Description: "S3 compliant artifact",
+ Description: "S3 compliant minio",
Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.S3Artifact"),
},
},
"inline": {
SchemaProps: spec.SchemaProps{
- Description: "Inline artifact is embedded in sensor spec as a string",
+ Description: "Inline minio is embedded in sensor spec as a string",
Type: []string{"string"},
Format: "",
},
},
"file": {
SchemaProps: spec.SchemaProps{
- Description: "File artifact is artifact stored in a file",
+ Description: "File minio is minio stored in a file",
Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.FileArtifact"),
},
},
"url": {
SchemaProps: spec.SchemaProps{
- Description: "URL to fetch the artifact from",
+ Description: "URL to fetch the minio from",
Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.URLArtifact"),
},
},
"configmap": {
SchemaProps: spec.SchemaProps{
- Description: "Configmap that stores the artifact",
+ Description: "Configmap that stores the minio",
Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.ConfigmapArtifact"),
},
},
"git": {
SchemaProps: spec.SchemaProps{
- Description: "Git repository hosting the artifact",
+ Description: "Git repository hosting the minio",
Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.GitArtifact"),
},
},
@@ -160,7 +161,7 @@ func schema_pkg_apis_sensor_v1alpha1_ConfigmapArtifact(ref common.ReferenceCallb
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
- Description: "ConfigmapArtifact contains information about artifact in k8 configmap",
+ Description: "ConfigmapArtifact contains information about minio in k8 configmap",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
@@ -371,7 +372,7 @@ func schema_pkg_apis_sensor_v1alpha1_FileArtifact(ref common.ReferenceCallback)
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
- Description: "FileArtifact contains information about an artifact in a filesystem",
+ Description: "FileArtifact contains information about an minio in a filesystem",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"path": {
@@ -390,7 +391,7 @@ func schema_pkg_apis_sensor_v1alpha1_GitArtifact(ref common.ReferenceCallback) c
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
- Description: "GitArtifact contains information about an artifact stored in git",
+ Description: "GitArtifact contains information about an minio stored in git",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"url": {
@@ -709,6 +710,34 @@ func schema_pkg_apis_sensor_v1alpha1_SensorList(ref common.ReferenceCallback) co
}
}
+func schema_pkg_apis_sensor_v1alpha1_SensorResources(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "SensorResources holds the metadata of the resources created for the sensor",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "deployment": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Deployment holds the metadata of the deployment for the sensor",
+ Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
+ },
+ },
+ "service": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Service holds the metadata of the service for the sensor",
+ Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
+ },
+ },
+ },
+ Required: []string{"deployment"},
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
+ }
+}
+
func schema_pkg_apis_sensor_v1alpha1_SensorSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
@@ -872,12 +901,18 @@ func schema_pkg_apis_sensor_v1alpha1_SensorStatus(ref common.ReferenceCallback)
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
+ "resources": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Resources refers to metadata of the resources created for the sensor",
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SensorResources"),
+ },
+ },
},
- Required: []string{"phase", "triggerCycleStatus", "lastCycleTime"},
+ Required: []string{"phase", "triggerCycleStatus", "lastCycleTime", "resources"},
},
},
Dependencies: []string{
- "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.NodeStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
+ "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.NodeStatus", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SensorResources", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
@@ -1212,7 +1247,7 @@ func schema_pkg_apis_sensor_v1alpha1_URLArtifact(ref common.ReferenceCallback) c
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
- Description: "URLArtifact contains information about an artifact at an http endpoint.",
+ Description: "URLArtifact contains information about an minio at an http endpoint.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"path": {
diff --git a/pkg/apis/sensor/v1alpha1/types.go b/pkg/apis/sensor/v1alpha1/types.go
index 0182c247a3..be482ca4f1 100644
--- a/pkg/apis/sensor/v1alpha1/types.go
+++ b/pkg/apis/sensor/v1alpha1/types.go
@@ -28,8 +28,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-const ArgoEventsSensorVersion = "v0.11"
-
// NotificationType represent a type of notifications that are handled by a sensor
type NotificationType string
@@ -98,24 +96,18 @@ type SensorSpec struct {
// +listType=dependencies
// Dependencies is a list of the events that this sensor is dependent on.
Dependencies []EventDependency `json:"dependencies" protobuf:"bytes,1,rep,name=dependencies"`
-
// +listType=triggers
// Triggers is a list of the things that this sensor evokes. These are the outputs from this sensor.
Triggers []Trigger `json:"triggers" protobuf:"bytes,2,rep,name=triggers"`
-
// Template contains sensor pod specification. For more information, read https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#pod-v1-core
Template *corev1.PodTemplateSpec `json:"template" protobuf:"bytes,3,name=template"`
-
// EventProtocol is the protocol through which sensor receives events from gateway
EventProtocol *apicommon.EventProtocol `json:"eventProtocol" protobuf:"bytes,4,name=eventProtocol"`
-
// Circuit is a boolean expression of dependency groups
Circuit string `json:"circuit,omitempty" protobuf:"bytes,5,rep,name=circuit"`
-
// +listType=dependencyGroups
// DependencyGroups is a list of the groups of events.
DependencyGroups []DependencyGroup `json:"dependencyGroups,omitempty" protobuf:"bytes,6,rep,name=dependencyGroups"`
-
// ErrorOnFailedRound if set to true, marks sensor state as `error` if the previous trigger round fails.
// Once sensor state is set to `error`, no further triggers will be processed.
ErrorOnFailedRound bool `json:"errorOnFailedRound,omitempty" protobuf:"bytes,7,opt,name=errorOnFailedRound"`
@@ -125,10 +117,8 @@ type SensorSpec struct {
type EventDependency struct {
// Name is a unique name of this dependency
Name string `json:"name" protobuf:"bytes,1,name=name"`
-
// Filters and rules governing tolerations of success and constraints on the context and data of an event
Filters EventDependencyFilter `json:"filters,omitempty" protobuf:"bytes,2,opt,name=filters"`
-
// Connected tells if subscription is already setup in case of nats protocol.
Connected bool `json:"connected,omitempty" protobuf:"bytes,3,opt,name=connected"`
}
@@ -146,13 +136,10 @@ type DependencyGroup struct {
type EventDependencyFilter struct {
// Name is the name of event filter
Name string `json:"name" protobuf:"bytes,1,name=name"`
-
// Time filter on the event with escalation
Time *TimeFilter `json:"time,omitempty" protobuf:"bytes,2,opt,name=time"`
-
// Context filter constraints with escalation
Context *common.EventContext `json:"context,omitempty" protobuf:"bytes,3,opt,name=context"`
-
// +listType=data
// Data filter constraints with escalation
Data []DataFilter `json:"data,omitempty" protobuf:"bytes,4,opt,name=data"`
@@ -167,7 +154,6 @@ type TimeFilter struct {
// Before this time, events for this event are ignored and
// format is hh:mm:ss
Start string `json:"start,omitempty" protobuf:"bytes,1,opt,name=start"`
-
// StopPattern is the end of a time window.
// After this time, events for this event are ignored and
// format is hh:mm:ss
@@ -193,15 +179,13 @@ type DataFilter struct {
// To access an array value use the index as the key. The dot and wildcard characters can be escaped with '\\'.
// See https://github.com/tidwall/gjson#path-syntax for more information on how to use this.
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
-
// Type contains the JSON type of the data
Type JSONType `json:"type" protobuf:"bytes,2,opt,name=type"`
-
// +listType=value
// Value is the allowed string values for this key
// Booleans are passed using strconv.ParseBool()
// Numbers are parsed using as float64 using strconv.ParseFloat()
- // Strings are treated as regular expressions
+ // Strings are taken as is
// Nils this value is ignored
Value []string `json:"value" protobuf:"bytes,3,rep,name=value"`
}
@@ -210,15 +194,12 @@ type DataFilter struct {
type Trigger struct {
// Template describes the trigger specification.
Template *TriggerTemplate `json:"template" protobuf:"bytes,1,name=template"`
-
// +listType=templateParameters
// TemplateParameters is the list of resource parameters to pass to the template object
TemplateParameters []TriggerParameter `json:"templateParameters,omitempty" protobuf:"bytes,2,rep,name=templateParameters"`
-
// +listType=resourceParameters
// ResourceParameters is the list of resource parameters to pass to resolved resource object in template object
ResourceParameters []TriggerParameter `json:"resourceParameters,omitempty" protobuf:"bytes,3,rep,name=resourceParameters"`
-
// Policy to configure backoff and execution criteria for the trigger
Policy *TriggerPolicy `json:"policy" protobuf:"bytes,4,opt,name=policy"`
}
@@ -227,13 +208,10 @@ type Trigger struct {
type TriggerTemplate struct {
// Name is a unique name of the action to take
Name string `json:"name" protobuf:"bytes,1,name=name"`
-
// When is the condition to execute the trigger
When *TriggerCondition `json:"when,omitempty" protobuf:"bytes,2,opt,name=when"`
-
// The unambiguous kind of this object - used in order to retrieve the appropriate kubernetes api client for this resource
*metav1.GroupVersionResource `json:",inline" protobuf:"bytes,3,opt,name=groupVersionResource"`
-
// Source of the K8 resource file(s)
Source *ArtifactLocation `json:"source" protobuf:"bytes,4,opt,name=source"`
}
@@ -244,7 +222,6 @@ type TriggerCondition struct {
// +listType=any
// Any acts as a OR operator between dependencies
Any []string `json:"any,omitempty" protobuf:"bytes,1,rep,name=any"`
-
// +listType=all
// All acts as a AND operator between dependencies
All []string `json:"all,omitempty" protobuf:"bytes,2,rep,name=all"`
@@ -269,13 +246,11 @@ const (
type TriggerParameter struct {
// Src contains a source reference to the value of the parameter from a event event
Src *TriggerParameterSource `json:"src" protobuf:"bytes,1,name=src"`
-
// Dest is the JSONPath of a resource key.
// A path is a series of keys separated by a dot. The colon character can be escaped with '.'
// The -1 key can be used to append a value to an existing array.
// See https://github.com/tidwall/sjson#path-syntax for more information about how this is used.
Dest string `json:"dest" protobuf:"bytes,2,name=dest"`
-
// Operation is what to do with the existing value at Dest, whether to
// 'prepend', 'overwrite', or 'append' it.
Operation TriggerParameterOperation `json:"operation,omitempty" protobuf:"bytes,3,opt,name=operation"`
@@ -285,13 +260,11 @@ type TriggerParameter struct {
type TriggerParameterSource struct {
// Event is the name of the event for which to retrieve this event
Event string `json:"event" protobuf:"bytes,1,opt,name=event"`
-
// Path is the JSONPath of the event's (JSON decoded) data key
// Path is a series of keys separated by a dot. A key may contain wildcard characters '*' and '?'.
// To access an array value use the index as the key. The dot and wildcard characters can be escaped with '\\'.
// See https://github.com/tidwall/gjson#path-syntax for more information on how to use this.
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
-
// Value is the default literal value to use for this parameter source
// This is only used if the path is invalid.
// If the path is invalid and this is not defined, this param source will produce an error.
@@ -302,10 +275,8 @@ type TriggerParameterSource struct {
type TriggerPolicy struct {
// Backoff before checking resource state
Backoff Backoff `json:"backoff" protobuf:"bytes,1,opt,name=backoff"`
-
// State refers to labels used to check the resource state
State *TriggerStateLabels `json:"state" protobuf:"bytes,2,opt,name=state"`
-
// ErrorOnBackoffTimeout determines whether sensor should transition to error state if the backoff times out and yet the resource neither transitioned into success or failure.
ErrorOnBackoffTimeout bool `json:"errorOnBackoffTimeout" protobuf:"bytes,3,opt,name=errorOnBackoffTimeout"`
}
@@ -314,13 +285,10 @@ type TriggerPolicy struct {
type Backoff struct {
// Duration is the duration in nanoseconds
Duration time.Duration `json:"duration" protobuf:"bytes,1,opt,name=duration"`
-
// Duration is multiplied by factor each iteration
Factor float64 `json:"factor" protobuf:"bytes,2,opt,name=factor"`
-
// The amount of jitter applied each iteration
Jitter float64 `json:"jitter" protobuf:"bytes,3,opt,name=jitter"`
-
// Exit with error after this many steps
Steps int `json:"steps" protobuf:"bytes,4,opt,name=steps"`
}
@@ -329,37 +297,40 @@ type Backoff struct {
type TriggerStateLabels struct {
// Success defines labels required to identify a resource in success state
Success map[string]string `json:"success" protobuf:"bytes,1,opt,name=success"`
-
// Failure defines labels required to identify a resource in failed state
Failure map[string]string `json:"failure" protobuf:"bytes,2,opt,name=failure"`
}
+// SensorResources holds the metadata of the resources created for the sensor
+type SensorResources struct {
+ // Deployment holds the metadata of the deployment for the sensor
+ Deployment *metav1.ObjectMeta `json:"deployment" protobuf:"bytes,1,name=deployment"`
+ // Service holds the metadata of the service for the sensor
+ // +optional
+ Service *metav1.ObjectMeta `json:"service,omitempty" protobuf:"bytes,2,opt,name=service"`
+}
+
// SensorStatus contains information about the status of a sensor.
type SensorStatus struct {
// Phase is the high-level summary of the sensor
Phase NodePhase `json:"phase" protobuf:"bytes,1,opt,name=phase"`
-
// StartedAt is the time at which this sensor was initiated
StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,2,opt,name=startedAt"`
-
// CompletedAt is the time at which this sensor was completed
CompletedAt metav1.Time `json:"completedAt,omitempty" protobuf:"bytes,3,opt,name=completedAt"`
-
// Message is a human readable string indicating details about a sensor in its phase
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
-
// Nodes is a mapping between a node ID and the node's status
// it records the states for the FSM of this sensor.
Nodes map[string]NodeStatus `json:"nodes,omitempty" protobuf:"bytes,5,rep,name=nodes"`
-
// TriggerCycleCount is the count of sensor's trigger cycle runs.
TriggerCycleCount int32 `json:"triggerCycleCount,omitempty" protobuf:"varint,6,opt,name=triggerCycleCount"`
-
// TriggerCycleState is the status from last cycle of triggers execution.
TriggerCycleStatus TriggerCycleState `json:"triggerCycleStatus" protobuf:"bytes,7,opt,name=triggerCycleStatus"`
-
// LastCycleTime is the time when last trigger cycle completed
LastCycleTime metav1.Time `json:"lastCycleTime" protobuf:"bytes,8,opt,name=lastCycleTime"`
+ // Resources refers to metadata of the resources created for the sensor
+ Resources *SensorResources `json:"resources" protobuf:"bytes,9,name=resources"`
}
// NodeStatus describes the status for an individual node in the sensor's FSM.
@@ -368,120 +339,94 @@ type NodeStatus struct {
// ID is a unique identifier of a node within a sensor
// It is a hash of the node name
ID string `json:"id" protobuf:"bytes,1,opt,name=id"`
-
// Name is a unique name in the node tree used to generate the node ID
Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
-
// DisplayName is the human readable representation of the node
DisplayName string `json:"displayName" protobuf:"bytes,3,opt,name=displayName"`
-
// Type is the type of the node
Type NodeType `json:"type" protobuf:"bytes,4,opt,name=type"`
-
// Phase of the node
Phase NodePhase `json:"phase" protobuf:"bytes,5,opt,name=phase"`
-
// StartedAt is the time at which this node started
StartedAt metav1.MicroTime `json:"startedAt,omitempty" protobuf:"bytes,6,opt,name=startedAt"`
-
// CompletedAt is the time at which this node completed
CompletedAt metav1.MicroTime `json:"completedAt,omitempty" protobuf:"bytes,7,opt,name=completedAt"`
-
// store data or something to save for event notifications or trigger events
Message string `json:"message,omitempty" protobuf:"bytes,8,opt,name=message"`
-
// Event stores the last seen event for this node
Event *apicommon.Event `json:"event,omitempty" protobuf:"bytes,9,opt,name=event"`
}
-// ArtifactLocation describes the source location for an external artifact
+// ArtifactLocation describes the source location for an external minio
type ArtifactLocation struct {
- // S3 compliant artifact
+ // S3 compliant minio
S3 *apicommon.S3Artifact `json:"s3,omitempty" protobuf:"bytes,1,opt,name=s3"`
-
- // Inline artifact is embedded in sensor spec as a string
+ // Inline minio is embedded in sensor spec as a string
Inline *string `json:"inline,omitempty" protobuf:"bytes,2,opt,name=inline"`
-
- // File artifact is artifact stored in a file
+ // File minio is minio stored in a file
File *FileArtifact `json:"file,omitempty" protobuf:"bytes,3,opt,name=file"`
-
- // URL to fetch the artifact from
+ // URL to fetch the minio from
URL *URLArtifact `json:"url,omitempty" protobuf:"bytes,4,opt,name=url"`
-
- // Configmap that stores the artifact
+ // Configmap that stores the minio
Configmap *ConfigmapArtifact `json:"configmap,omitempty" protobuf:"bytes,5,opt,name=configmap"`
-
- // Git repository hosting the artifact
+ // Git repository hosting the minio
Git *GitArtifact `json:"git,omitempty" protobuf:"bytes,6,opt,name=git"`
-
// Resource is generic template for K8s resource
Resource *unstructured.Unstructured `json:"resource,omitempty" protobuf:"bytes,7,opt,name=resource"`
}
-// ConfigmapArtifact contains information about artifact in k8 configmap
+// ConfigmapArtifact contains information about minio in k8 configmap
type ConfigmapArtifact struct {
// Name of the configmap
Name string `json:"name" protobuf:"bytes,1,name=name"`
-
// Namespace where configmap is deployed
Namespace string `json:"namespace" protobuf:"bytes,2,name=namespace"`
-
// Key within configmap data which contains trigger resource definition
Key string `json:"key" protobuf:"bytes,3,name=key"`
}
-// FileArtifact contains information about an artifact in a filesystem
+// FileArtifact contains information about an minio in a filesystem
type FileArtifact struct {
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
-// URLArtifact contains information about an artifact at an http endpoint.
+// URLArtifact contains information about an minio at an http endpoint.
type URLArtifact struct {
// Path is the complete URL
Path string `json:"path" protobuf:"bytes,1,name=path"`
-
// VerifyCert decides whether the connection is secure or not
VerifyCert bool `json:"verifyCert,omitempty" protobuf:"bytes,2,opt,name=verifyCert"`
}
-// GitArtifact contains information about an artifact stored in git
+// GitArtifact contains information about an minio stored in git
type GitArtifact struct {
// Git URL
URL string `json:"url" protobuf:"bytes,1,name=url"`
-
// Directory to clone the repository. We clone complete directory because GitArtifact is not limited to any specific Git service providers.
// Hence we don't use any specific git provider client.
CloneDirectory string `json:"cloneDirectory" protobuf:"bytes,2,name=cloneDirectory"`
-
// Creds contain reference to git username and password
// +optional
Creds *GitCreds `json:"creds,omitempty" protobuf:"bytes,3,opt,name=creds"`
-
// Namespace where creds are stored.
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"`
-
// SSHKeyPath is path to your ssh key path. Use this if you don't want to provide username and password.
// ssh key path must be mounted in sensor pod.
// +optional
SSHKeyPath string `json:"sshKeyPath,omitempty" protobuf:"bytes,5,opt,name=sshKeyPath"`
-
// Path to file that contains trigger resource definition
FilePath string `json:"filePath" protobuf:"bytes,6,name=filePath"`
-
// Branch to use to pull trigger resource
// +optional
Branch string `json:"branch,omitempty" protobuf:"bytes,7,opt,name=branch"`
-
// Tag to use to pull trigger resource
// +optional
Tag string `json:"tag,omitempty" protobuf:"bytes,8,opt,name=tag"`
-
// Ref to use to pull trigger resource. Will result in a shallow clone and
// fetch.
// +optional
Ref string `json:"ref,omitempty" protobuf:"bytes,9,opt,name=ref"`
-
// Remote to manage set of tracked repositories. Defaults to "origin".
// Refer https://git-scm.com/docs/git-remote
// +optional
@@ -492,7 +437,6 @@ type GitArtifact struct {
type GitRemoteConfig struct {
// Name of the remote to fetch from.
Name string `json:"name" protobuf:"bytes,1,name=name"`
-
// +listType=urls
// URLs the URLs of a remote repository. It must be non-empty. Fetch will
// always use the first URL, while push will use all of them.
@@ -505,7 +449,7 @@ type GitCreds struct {
Password *corev1.SecretKeySelector `json:"password" protobuf:"bytes,2,opt,name=password"`
}
-// HasLocation whether or not an artifact has a location defined
+// HasLocation whether or not an minio has a location defined
func (a *ArtifactLocation) HasLocation() bool {
return a.S3 != nil || a.Inline != nil || a.File != nil || a.URL != nil
}
diff --git a/pkg/apis/sensor/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/sensor/v1alpha1/zz_generated.deepcopy.go
index e638866a17..89167cf8c8 100644
--- a/pkg/apis/sensor/v1alpha1/zz_generated.deepcopy.go
+++ b/pkg/apis/sensor/v1alpha1/zz_generated.deepcopy.go
@@ -20,10 +20,10 @@ limitations under the License.
package v1alpha1
import (
- "github.com/argoproj/argo-events/pkg/apis/common"
+ common "github.com/argoproj/argo-events/pkg/apis/common"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
+ runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -373,6 +373,32 @@ func (in *SensorList) DeepCopyObject() runtime.Object {
return nil
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SensorResources) DeepCopyInto(out *SensorResources) {
+ *out = *in
+ if in.Deployment != nil {
+ in, out := &in.Deployment, &out.Deployment
+ *out = new(metav1.ObjectMeta)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Service != nil {
+ in, out := &in.Service, &out.Service
+ *out = new(metav1.ObjectMeta)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SensorResources.
+func (in *SensorResources) DeepCopy() *SensorResources {
+ if in == nil {
+ return nil
+ }
+ out := new(SensorResources)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SensorSpec) DeepCopyInto(out *SensorSpec) {
*out = *in
@@ -433,6 +459,11 @@ func (in *SensorStatus) DeepCopyInto(out *SensorStatus) {
}
}
in.LastCycleTime.DeepCopyInto(&out.LastCycleTime)
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = new(SensorResources)
+ (*in).DeepCopyInto(*out)
+ }
return
}
diff --git a/pkg/client/eventsources/clientset/versioned/clientset.go b/pkg/client/eventsources/clientset/versioned/clientset.go
new file mode 100644
index 0000000000..334969a3f4
--- /dev/null
+++ b/pkg/client/eventsources/clientset/versioned/clientset.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by client-gen. DO NOT EDIT.
+
+package versioned
+
+import (
+ argoprojv1alpha1 "github.com/argoproj/argo-events/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1"
+ discovery "k8s.io/client-go/discovery"
+ rest "k8s.io/client-go/rest"
+ flowcontrol "k8s.io/client-go/util/flowcontrol"
+)
+
+type Interface interface {
+ Discovery() discovery.DiscoveryInterface
+ ArgoprojV1alpha1() argoprojv1alpha1.ArgoprojV1alpha1Interface
+}
+
+// Clientset contains the clients for groups. Each group has exactly one
+// version included in a Clientset.
+type Clientset struct {
+ *discovery.DiscoveryClient
+ argoprojV1alpha1 *argoprojv1alpha1.ArgoprojV1alpha1Client
+}
+
+// ArgoprojV1alpha1 retrieves the ArgoprojV1alpha1Client
+func (c *Clientset) ArgoprojV1alpha1() argoprojv1alpha1.ArgoprojV1alpha1Interface {
+ return c.argoprojV1alpha1
+}
+
+// Discovery retrieves the DiscoveryClient
+func (c *Clientset) Discovery() discovery.DiscoveryInterface {
+ if c == nil {
+ return nil
+ }
+ return c.DiscoveryClient
+}
+
+// NewForConfig creates a new Clientset for the given config.
+func NewForConfig(c *rest.Config) (*Clientset, error) {
+ configShallowCopy := *c
+ if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
+ configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
+ }
+ var cs Clientset
+ var err error
+ cs.argoprojV1alpha1, err = argoprojv1alpha1.NewForConfig(&configShallowCopy)
+ if err != nil {
+ return nil, err
+ }
+
+ cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
+ if err != nil {
+ return nil, err
+ }
+ return &cs, nil
+}
+
+// NewForConfigOrDie creates a new Clientset for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *Clientset {
+ var cs Clientset
+ cs.argoprojV1alpha1 = argoprojv1alpha1.NewForConfigOrDie(c)
+
+ cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
+ return &cs
+}
+
+// New creates a new Clientset for the given RESTClient.
+func New(c rest.Interface) *Clientset {
+ var cs Clientset
+ cs.argoprojV1alpha1 = argoprojv1alpha1.New(c)
+
+ cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
+ return &cs
+}
diff --git a/pkg/client/eventsources/clientset/versioned/doc.go b/pkg/client/eventsources/clientset/versioned/doc.go
new file mode 100644
index 0000000000..4fe3ab760c
--- /dev/null
+++ b/pkg/client/eventsources/clientset/versioned/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated clientset.
+package versioned
diff --git a/pkg/client/eventsources/clientset/versioned/fake/clientset_generated.go b/pkg/client/eventsources/clientset/versioned/fake/clientset_generated.go
new file mode 100644
index 0000000000..2ea0313bdc
--- /dev/null
+++ b/pkg/client/eventsources/clientset/versioned/fake/clientset_generated.go
@@ -0,0 +1,81 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ clientset "github.com/argoproj/argo-events/pkg/client/eventsources/clientset/versioned"
+ argoprojv1alpha1 "github.com/argoproj/argo-events/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1"
+ fakeargoprojv1alpha1 "github.com/argoproj/argo-events/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/fake"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/client-go/discovery"
+ fakediscovery "k8s.io/client-go/discovery/fake"
+ "k8s.io/client-go/testing"
+)
+
+// NewSimpleClientset returns a clientset that will respond with the provided objects.
+// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
+// without applying any validations and/or defaults. It shouldn't be considered a replacement
+// for a real clientset and is mostly useful in simple unit tests.
+func NewSimpleClientset(objects ...runtime.Object) *Clientset {
+ o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
+ for _, obj := range objects {
+ if err := o.Add(obj); err != nil {
+ panic(err)
+ }
+ }
+
+ cs := &Clientset{tracker: o}
+ cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
+ cs.AddReactor("*", "*", testing.ObjectReaction(o))
+ cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
+ gvr := action.GetResource()
+ ns := action.GetNamespace()
+ watch, err := o.Watch(gvr, ns)
+ if err != nil {
+ return false, nil, err
+ }
+ return true, watch, nil
+ })
+
+ return cs
+}
+
+// Clientset implements clientset.Interface. Meant to be embedded into a
+// struct to get a default implementation. This makes faking out just the method
+// you want to test easier.
+type Clientset struct {
+ testing.Fake
+ discovery *fakediscovery.FakeDiscovery
+ tracker testing.ObjectTracker
+}
+
+func (c *Clientset) Discovery() discovery.DiscoveryInterface {
+ return c.discovery
+}
+
+func (c *Clientset) Tracker() testing.ObjectTracker {
+ return c.tracker
+}
+
+var _ clientset.Interface = &Clientset{}
+
+// ArgoprojV1alpha1 retrieves the ArgoprojV1alpha1Client
+func (c *Clientset) ArgoprojV1alpha1() argoprojv1alpha1.ArgoprojV1alpha1Interface {
+ return &fakeargoprojv1alpha1.FakeArgoprojV1alpha1{Fake: &c.Fake}
+}
diff --git a/pkg/client/eventsources/clientset/versioned/fake/doc.go b/pkg/client/eventsources/clientset/versioned/fake/doc.go
new file mode 100644
index 0000000000..3695dbac63
--- /dev/null
+++ b/pkg/client/eventsources/clientset/versioned/fake/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated fake clientset.
+package fake
diff --git a/pkg/client/eventsources/clientset/versioned/fake/register.go b/pkg/client/eventsources/clientset/versioned/fake/register.go
new file mode 100644
index 0000000000..5eba7f7f46
--- /dev/null
+++ b/pkg/client/eventsources/clientset/versioned/fake/register.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ argoprojv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var scheme = runtime.NewScheme()
+var codecs = serializer.NewCodecFactory(scheme)
+var parameterCodec = runtime.NewParameterCodec(scheme)
+var localSchemeBuilder = runtime.SchemeBuilder{
+ argoprojv1alpha1.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(scheme))
+}
diff --git a/pkg/client/eventsources/clientset/versioned/scheme/doc.go b/pkg/client/eventsources/clientset/versioned/scheme/doc.go
new file mode 100644
index 0000000000..3ca1386b63
--- /dev/null
+++ b/pkg/client/eventsources/clientset/versioned/scheme/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package contains the scheme of the automatically generated clientset.
+package scheme
diff --git a/pkg/client/eventsources/clientset/versioned/scheme/register.go b/pkg/client/eventsources/clientset/versioned/scheme/register.go
new file mode 100644
index 0000000000..3db4d70d1e
--- /dev/null
+++ b/pkg/client/eventsources/clientset/versioned/scheme/register.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by client-gen. DO NOT EDIT.
+
+package scheme
+
+import (
+ argoprojv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var Scheme = runtime.NewScheme()
+var Codecs = serializer.NewCodecFactory(Scheme)
+var ParameterCodec = runtime.NewParameterCodec(Scheme)
+var localSchemeBuilder = runtime.SchemeBuilder{
+ argoprojv1alpha1.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(Scheme))
+}
diff --git a/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/doc.go b/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/doc.go
new file mode 100644
index 0000000000..7f038ec4b0
--- /dev/null
+++ b/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1alpha1
diff --git a/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/eventsource.go b/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/eventsource.go
new file mode 100644
index 0000000000..a6f5049dfc
--- /dev/null
+++ b/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/eventsource.go
@@ -0,0 +1,190 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "time"
+
+ v1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ scheme "github.com/argoproj/argo-events/pkg/client/eventsources/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// EventSourcesGetter has a method to return a EventSourceInterface.
+// A group's client should implement this interface.
+type EventSourcesGetter interface {
+ EventSources(namespace string) EventSourceInterface
+}
+
+// EventSourceInterface has methods to work with EventSource resources.
+type EventSourceInterface interface {
+ Create(*v1alpha1.EventSource) (*v1alpha1.EventSource, error)
+ Update(*v1alpha1.EventSource) (*v1alpha1.EventSource, error)
+ UpdateStatus(*v1alpha1.EventSource) (*v1alpha1.EventSource, error)
+ Delete(name string, options *v1.DeleteOptions) error
+ DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+ Get(name string, options v1.GetOptions) (*v1alpha1.EventSource, error)
+ List(opts v1.ListOptions) (*v1alpha1.EventSourceList, error)
+ Watch(opts v1.ListOptions) (watch.Interface, error)
+ Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EventSource, err error)
+ EventSourceExpansion
+}
+
+// eventSources implements EventSourceInterface
+type eventSources struct {
+ client rest.Interface
+ ns string
+}
+
+// newEventSources returns a EventSources
+func newEventSources(c *ArgoprojV1alpha1Client, namespace string) *eventSources {
+ return &eventSources{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the eventSource, and returns the corresponding eventSource object, and an error if there is any.
+func (c *eventSources) Get(name string, options v1.GetOptions) (result *v1alpha1.EventSource, err error) {
+ result = &v1alpha1.EventSource{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("eventsources").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do().
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of EventSources that match those selectors.
+func (c *eventSources) List(opts v1.ListOptions) (result *v1alpha1.EventSourceList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.EventSourceList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("eventsources").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do().
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested eventSources.
+func (c *eventSources) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("eventsources").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch()
+}
+
+// Create takes the representation of a eventSource and creates it. Returns the server's representation of the eventSource, and an error, if there is any.
+func (c *eventSources) Create(eventSource *v1alpha1.EventSource) (result *v1alpha1.EventSource, err error) {
+ result = &v1alpha1.EventSource{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("eventsources").
+ Body(eventSource).
+ Do().
+ Into(result)
+ return
+}
+
+// Update takes the representation of a eventSource and updates it. Returns the server's representation of the eventSource, and an error, if there is any.
+func (c *eventSources) Update(eventSource *v1alpha1.EventSource) (result *v1alpha1.EventSource, err error) {
+ result = &v1alpha1.EventSource{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("eventsources").
+ Name(eventSource.Name).
+ Body(eventSource).
+ Do().
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *eventSources) UpdateStatus(eventSource *v1alpha1.EventSource) (result *v1alpha1.EventSource, err error) {
+ result = &v1alpha1.EventSource{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("eventsources").
+ Name(eventSource.Name).
+ SubResource("status").
+ Body(eventSource).
+ Do().
+ Into(result)
+ return
+}
+
+// Delete takes name of the eventSource and deletes it. Returns an error if one occurs.
+func (c *eventSources) Delete(name string, options *v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("eventsources").
+ Name(name).
+ Body(options).
+ Do().
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *eventSources) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ var timeout time.Duration
+ if listOptions.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("eventsources").
+ VersionedParams(&listOptions, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(options).
+ Do().
+ Error()
+}
+
+// Patch applies the patch and returns the patched eventSource.
+func (c *eventSources) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EventSource, err error) {
+ result = &v1alpha1.EventSource{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("eventsources").
+ SubResource(subresources...).
+ Name(name).
+ Body(data).
+ Do().
+ Into(result)
+ return
+}
diff --git a/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/eventsources_client.go b/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/eventsources_client.go
new file mode 100644
index 0000000000..a9979b1a67
--- /dev/null
+++ b/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/eventsources_client.go
@@ -0,0 +1,88 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "github.com/argoproj/argo-events/pkg/client/eventsources/clientset/versioned/scheme"
+ rest "k8s.io/client-go/rest"
+)
+
+type ArgoprojV1alpha1Interface interface {
+ RESTClient() rest.Interface
+ EventSourcesGetter
+}
+
+// ArgoprojV1alpha1Client is used to interact with features provided by the argoproj.io group.
+type ArgoprojV1alpha1Client struct {
+ restClient rest.Interface
+}
+
+func (c *ArgoprojV1alpha1Client) EventSources(namespace string) EventSourceInterface {
+ return newEventSources(c, namespace)
+}
+
+// NewForConfig creates a new ArgoprojV1alpha1Client for the given config.
+func NewForConfig(c *rest.Config) (*ArgoprojV1alpha1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return &ArgoprojV1alpha1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new ArgoprojV1alpha1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *ArgoprojV1alpha1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new ArgoprojV1alpha1Client for the given RESTClient.
+func New(c rest.Interface) *ArgoprojV1alpha1Client {
+ return &ArgoprojV1alpha1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1alpha1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *ArgoprojV1alpha1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/fake/doc.go b/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/fake/doc.go
new file mode 100644
index 0000000000..eb8791ce4f
--- /dev/null
+++ b/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/fake/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/fake/fake_eventsource.go b/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/fake/fake_eventsource.go
new file mode 100644
index 0000000000..82b995586b
--- /dev/null
+++ b/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/fake/fake_eventsource.go
@@ -0,0 +1,139 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeEventSources implements EventSourceInterface
+type FakeEventSources struct {
+ Fake *FakeArgoprojV1alpha1
+ ns string
+}
+
+var eventsourcesResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "eventsources"}
+
+var eventsourcesKind = schema.GroupVersionKind{Group: "argoproj.io", Version: "v1alpha1", Kind: "EventSource"}
+
+// Get takes name of the eventSource, and returns the corresponding eventSource object, and an error if there is any.
+func (c *FakeEventSources) Get(name string, options v1.GetOptions) (result *v1alpha1.EventSource, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(eventsourcesResource, c.ns, name), &v1alpha1.EventSource{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.EventSource), err
+}
+
+// List takes label and field selectors, and returns the list of EventSources that match those selectors.
+func (c *FakeEventSources) List(opts v1.ListOptions) (result *v1alpha1.EventSourceList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(eventsourcesResource, eventsourcesKind, c.ns, opts), &v1alpha1.EventSourceList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.EventSourceList{ListMeta: obj.(*v1alpha1.EventSourceList).ListMeta}
+ for _, item := range obj.(*v1alpha1.EventSourceList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested eventSources.
+func (c *FakeEventSources) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(eventsourcesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a eventSource and creates it. Returns the server's representation of the eventSource, and an error, if there is any.
+func (c *FakeEventSources) Create(eventSource *v1alpha1.EventSource) (result *v1alpha1.EventSource, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(eventsourcesResource, c.ns, eventSource), &v1alpha1.EventSource{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.EventSource), err
+}
+
+// Update takes the representation of a eventSource and updates it. Returns the server's representation of the eventSource, and an error, if there is any.
+func (c *FakeEventSources) Update(eventSource *v1alpha1.EventSource) (result *v1alpha1.EventSource, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(eventsourcesResource, c.ns, eventSource), &v1alpha1.EventSource{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.EventSource), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeEventSources) UpdateStatus(eventSource *v1alpha1.EventSource) (*v1alpha1.EventSource, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(eventsourcesResource, "status", c.ns, eventSource), &v1alpha1.EventSource{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.EventSource), err
+}
+
+// Delete takes name of the eventSource and deletes it. Returns an error if one occurs.
+func (c *FakeEventSources) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(eventsourcesResource, c.ns, name), &v1alpha1.EventSource{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeEventSources) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(eventsourcesResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.EventSourceList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched eventSource.
+func (c *FakeEventSources) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EventSource, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(eventsourcesResource, c.ns, name, pt, data, subresources...), &v1alpha1.EventSource{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.EventSource), err
+}
diff --git a/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/fake/fake_eventsources_client.go b/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/fake/fake_eventsources_client.go
new file mode 100644
index 0000000000..59140484c1
--- /dev/null
+++ b/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/fake/fake_eventsources_client.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "github.com/argoproj/argo-events/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeArgoprojV1alpha1 struct {
+ *testing.Fake
+}
+
+func (c *FakeArgoprojV1alpha1) EventSources(namespace string) v1alpha1.EventSourceInterface {
+ return &FakeEventSources{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeArgoprojV1alpha1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/gateways/common/doc.go b/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/generated_expansion.go
similarity index 77%
rename from gateways/common/doc.go
rename to pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/generated_expansion.go
index f333b2d6dd..a6f68bb95f 100644
--- a/gateways/common/doc.go
+++ b/pkg/client/eventsources/clientset/versioned/typed/eventsources/v1alpha1/generated_expansion.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -13,6 +13,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
+// Code generated by client-gen. DO NOT EDIT.
-// Package common contains structs and methods that are shared across different gateways.
-package common
+package v1alpha1
+
+type EventSourceExpansion interface{}
diff --git a/pkg/client/eventsources/informers/externalversions/eventsources/interface.go b/pkg/client/eventsources/informers/externalversions/eventsources/interface.go
new file mode 100644
index 0000000000..6a4faf1ffb
--- /dev/null
+++ b/pkg/client/eventsources/informers/externalversions/eventsources/interface.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by informer-gen. DO NOT EDIT.
+
+package argoproj
+
+import (
+ v1alpha1 "github.com/argoproj/argo-events/pkg/client/eventsources/informers/externalversions/eventsources/v1alpha1"
+ internalinterfaces "github.com/argoproj/argo-events/pkg/client/eventsources/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to each of this group's versions.
+type Interface interface {
+ // V1alpha1 provides access to shared informers for resources in V1alpha1.
+ V1alpha1() v1alpha1.Interface
+}
+
+type group struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// V1alpha1 returns a new v1alpha1.Interface.
+func (g *group) V1alpha1() v1alpha1.Interface {
+ return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
+}
diff --git a/pkg/client/eventsources/informers/externalversions/eventsources/v1alpha1/eventsource.go b/pkg/client/eventsources/informers/externalversions/eventsources/v1alpha1/eventsource.go
new file mode 100644
index 0000000000..51250a5936
--- /dev/null
+++ b/pkg/client/eventsources/informers/externalversions/eventsources/v1alpha1/eventsource.go
@@ -0,0 +1,88 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ time "time"
+
+ eventsourcesv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ versioned "github.com/argoproj/argo-events/pkg/client/eventsources/clientset/versioned"
+ internalinterfaces "github.com/argoproj/argo-events/pkg/client/eventsources/informers/externalversions/internalinterfaces"
+ v1alpha1 "github.com/argoproj/argo-events/pkg/client/eventsources/listers/eventsources/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// EventSourceInformer provides access to a shared informer and lister for
+// EventSources.
+type EventSourceInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1alpha1.EventSourceLister
+}
+
+type eventSourceInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewEventSourceInformer constructs a new informer for EventSource type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewEventSourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredEventSourceInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredEventSourceInformer constructs a new informer for EventSource type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredEventSourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ArgoprojV1alpha1().EventSources(namespace).List(options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ArgoprojV1alpha1().EventSources(namespace).Watch(options)
+ },
+ },
+ &eventsourcesv1alpha1.EventSource{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *eventSourceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredEventSourceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *eventSourceInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&eventsourcesv1alpha1.EventSource{}, f.defaultInformer)
+}
+
+func (f *eventSourceInformer) Lister() v1alpha1.EventSourceLister {
+ return v1alpha1.NewEventSourceLister(f.Informer().GetIndexer())
+}
diff --git a/pkg/client/eventsources/informers/externalversions/eventsources/v1alpha1/interface.go b/pkg/client/eventsources/informers/externalversions/eventsources/v1alpha1/interface.go
new file mode 100644
index 0000000000..28e6a79888
--- /dev/null
+++ b/pkg/client/eventsources/informers/externalversions/eventsources/v1alpha1/interface.go
@@ -0,0 +1,44 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ internalinterfaces "github.com/argoproj/argo-events/pkg/client/eventsources/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to all the informers in this group version.
+type Interface interface {
+ // EventSources returns a EventSourceInformer.
+ EventSources() EventSourceInformer
+}
+
+type version struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// EventSources returns a EventSourceInformer.
+func (v *version) EventSources() EventSourceInformer {
+ return &eventSourceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
diff --git a/pkg/client/eventsources/informers/externalversions/factory.go b/pkg/client/eventsources/informers/externalversions/factory.go
new file mode 100644
index 0000000000..e16a4f16b9
--- /dev/null
+++ b/pkg/client/eventsources/informers/externalversions/factory.go
@@ -0,0 +1,179 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by informer-gen. DO NOT EDIT.
+
+package externalversions
+
+import (
+ reflect "reflect"
+ sync "sync"
+ time "time"
+
+ versioned "github.com/argoproj/argo-events/pkg/client/eventsources/clientset/versioned"
+ eventsources "github.com/argoproj/argo-events/pkg/client/eventsources/informers/externalversions/eventsources"
+ internalinterfaces "github.com/argoproj/argo-events/pkg/client/eventsources/informers/externalversions/internalinterfaces"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// SharedInformerOption defines the functional option type for SharedInformerFactory.
+type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
+
+type sharedInformerFactory struct {
+ client versioned.Interface
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ lock sync.Mutex
+ defaultResync time.Duration
+ customResync map[reflect.Type]time.Duration
+
+ informers map[reflect.Type]cache.SharedIndexInformer
+ // startedInformers is used for tracking which informers have been started.
+ // This allows Start() to be called multiple times safely.
+ startedInformers map[reflect.Type]bool
+}
+
+// WithCustomResyncConfig sets a custom resync period for the specified informer types.
+func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
+ return func(factory *sharedInformerFactory) *sharedInformerFactory {
+ for k, v := range resyncConfig {
+ factory.customResync[reflect.TypeOf(k)] = v
+ }
+ return factory
+ }
+}
+
+// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
+func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
+ return func(factory *sharedInformerFactory) *sharedInformerFactory {
+ factory.tweakListOptions = tweakListOptions
+ return factory
+ }
+}
+
+// WithNamespace limits the SharedInformerFactory to the specified namespace.
+func WithNamespace(namespace string) SharedInformerOption {
+ return func(factory *sharedInformerFactory) *sharedInformerFactory {
+ factory.namespace = namespace
+ return factory
+ }
+}
+
+// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
+func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
+ return NewSharedInformerFactoryWithOptions(client, defaultResync)
+}
+
+// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
+// Listers obtained via this SharedInformerFactory will be subject to the same filters
+// as specified here.
+// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
+func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
+ return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
+}
+
+// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
+func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
+ factory := &sharedInformerFactory{
+ client: client,
+ namespace: v1.NamespaceAll,
+ defaultResync: defaultResync,
+ informers: make(map[reflect.Type]cache.SharedIndexInformer),
+ startedInformers: make(map[reflect.Type]bool),
+ customResync: make(map[reflect.Type]time.Duration),
+ }
+
+ // Apply all options
+ for _, opt := range options {
+ factory = opt(factory)
+ }
+
+ return factory
+}
+
+// Start initializes all requested informers.
+func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ for informerType, informer := range f.informers {
+ if !f.startedInformers[informerType] {
+ go informer.Run(stopCh)
+ f.startedInformers[informerType] = true
+ }
+ }
+}
+
+// WaitForCacheSync waits for all started informers' cache were synced.
+func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
+ informers := func() map[reflect.Type]cache.SharedIndexInformer {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ informers := map[reflect.Type]cache.SharedIndexInformer{}
+ for informerType, informer := range f.informers {
+ if f.startedInformers[informerType] {
+ informers[informerType] = informer
+ }
+ }
+ return informers
+ }()
+
+ res := map[reflect.Type]bool{}
+ for informType, informer := range informers {
+ res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
+ }
+ return res
+}
+
+// InternalInformerFor returns the SharedIndexInformer for obj using an internal
+// client.
+func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ informerType := reflect.TypeOf(obj)
+ informer, exists := f.informers[informerType]
+ if exists {
+ return informer
+ }
+
+ resyncPeriod, exists := f.customResync[informerType]
+ if !exists {
+ resyncPeriod = f.defaultResync
+ }
+
+ informer = newFunc(f.client, resyncPeriod)
+ f.informers[informerType] = informer
+
+ return informer
+}
+
+// SharedInformerFactory provides shared informers for resources in all known
+// API group versions.
+type SharedInformerFactory interface {
+ internalinterfaces.SharedInformerFactory
+ ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
+ WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
+
+ Argoproj() eventsources.Interface
+}
+
+func (f *sharedInformerFactory) Argoproj() eventsources.Interface {
+ return eventsources.New(f, f.namespace, f.tweakListOptions)
+}
diff --git a/pkg/client/eventsources/informers/externalversions/generic.go b/pkg/client/eventsources/informers/externalversions/generic.go
new file mode 100644
index 0000000000..df803f8fbb
--- /dev/null
+++ b/pkg/client/eventsources/informers/externalversions/generic.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by informer-gen. DO NOT EDIT.
+
+package externalversions
+
+import (
+ "fmt"
+
+ v1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// GenericInformer is type of SharedIndexInformer which will locate and delegate to other
+// sharedInformers based on type
+type GenericInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() cache.GenericLister
+}
+
+type genericInformer struct {
+ informer cache.SharedIndexInformer
+ resource schema.GroupResource
+}
+
+// Informer returns the SharedIndexInformer.
+func (f *genericInformer) Informer() cache.SharedIndexInformer {
+ return f.informer
+}
+
+// Lister returns the GenericLister.
+func (f *genericInformer) Lister() cache.GenericLister {
+ return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)
+}
+
+// ForResource gives generic access to a shared informer of the matching type
+// TODO extend this to unknown resources with a client pool
+func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
+ switch resource {
+ // Group=argoproj.io, Version=v1alpha1
+ case v1alpha1.SchemeGroupVersion.WithResource("eventsources"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Argoproj().V1alpha1().EventSources().Informer()}, nil
+
+ }
+
+ return nil, fmt.Errorf("no informer found for %v", resource)
+}
diff --git a/pkg/client/eventsources/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/client/eventsources/informers/externalversions/internalinterfaces/factory_interfaces.go
new file mode 100644
index 0000000000..652fe39056
--- /dev/null
+++ b/pkg/client/eventsources/informers/externalversions/internalinterfaces/factory_interfaces.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by informer-gen. DO NOT EDIT.
+
+package internalinterfaces
+
+import (
+ time "time"
+
+ versioned "github.com/argoproj/argo-events/pkg/client/eventsources/clientset/versioned"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.
+type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer
+
+// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
+type SharedInformerFactory interface {
+ Start(stopCh <-chan struct{})
+ InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
+}
+
+// TweakListOptionsFunc is a function that transforms a v1.ListOptions.
+type TweakListOptionsFunc func(*v1.ListOptions)
diff --git a/pkg/client/eventsources/listers/eventsources/v1alpha1/eventsource.go b/pkg/client/eventsources/listers/eventsources/v1alpha1/eventsource.go
new file mode 100644
index 0000000000..37b5fd3316
--- /dev/null
+++ b/pkg/client/eventsources/listers/eventsources/v1alpha1/eventsource.go
@@ -0,0 +1,93 @@
+/*
+Copyright 2018 BlackRock, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventsources/v1alpha1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// EventSourceLister helps list EventSources.
+type EventSourceLister interface {
+ // List lists all EventSources in the indexer.
+ List(selector labels.Selector) (ret []*v1alpha1.EventSource, err error)
+ // EventSources returns an object that can list and get EventSources.
+ EventSources(namespace string) EventSourceNamespaceLister
+ EventSourceListerExpansion
+}
+
+// eventSourceLister implements the EventSourceLister interface.
+type eventSourceLister struct {
+ indexer cache.Indexer
+}
+
+// NewEventSourceLister returns a new EventSourceLister.
+func NewEventSourceLister(indexer cache.Indexer) EventSourceLister {
+ return &eventSourceLister{indexer: indexer}
+}
+
+// List lists all EventSources in the indexer.
+func (s *eventSourceLister) List(selector labels.Selector) (ret []*v1alpha1.EventSource, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.EventSource))
+ })
+ return ret, err
+}
+
+// EventSources returns an object that can list and get EventSources.
+func (s *eventSourceLister) EventSources(namespace string) EventSourceNamespaceLister {
+ return eventSourceNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// EventSourceNamespaceLister helps list and get EventSources.
+type EventSourceNamespaceLister interface {
+ // List lists all EventSources in the indexer for a given namespace.
+ List(selector labels.Selector) (ret []*v1alpha1.EventSource, err error)
+ // Get retrieves the EventSource from the indexer for a given namespace and name.
+ Get(name string) (*v1alpha1.EventSource, error)
+ EventSourceNamespaceListerExpansion
+}
+
+// eventSourceNamespaceLister implements the EventSourceNamespaceLister
+// interface.
+type eventSourceNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all EventSources in the indexer for a given namespace.
+func (s eventSourceNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.EventSource, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.EventSource))
+ })
+ return ret, err
+}
+
+// Get retrieves the EventSource from the indexer for a given namespace and name.
+func (s eventSourceNamespaceLister) Get(name string) (*v1alpha1.EventSource, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha1.Resource("eventsource"), name)
+ }
+ return obj.(*v1alpha1.EventSource), nil
+}
diff --git a/gateways/core/stream/mqtt/config_test.go b/pkg/client/eventsources/listers/eventsources/v1alpha1/expansion_generated.go
similarity index 56%
rename from gateways/core/stream/mqtt/config_test.go
rename to pkg/client/eventsources/listers/eventsources/v1alpha1/expansion_generated.go
index f4282092c0..b2400b06c7 100644
--- a/gateways/core/stream/mqtt/config_test.go
+++ b/pkg/client/eventsources/listers/eventsources/v1alpha1/expansion_generated.go
@@ -13,26 +13,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
+// Code generated by lister-gen. DO NOT EDIT.
-package mqtt
+package v1alpha1
-import (
- "github.com/smartystreets/goconvey/convey"
- "testing"
-)
+// EventSourceListerExpansion allows custom methods to be added to
+// EventSourceLister.
+type EventSourceListerExpansion interface{}
-var es = `
-url: tcp://mqtt.argo-events:1883
-topic: foo
-clientId: 1
-`
-
-func TestParseConfig(t *testing.T) {
- convey.Convey("Given a mqtt event source, parse it", t, func() {
- ps, err := parseEventSource(es)
- convey.So(err, convey.ShouldBeNil)
- convey.So(ps, convey.ShouldNotBeNil)
- _, ok := ps.(*mqtt)
- convey.So(ok, convey.ShouldEqual, true)
- })
-}
+// EventSourceNamespaceListerExpansion allows custom methods to be added to
+// EventSourceNamespaceLister.
+type EventSourceNamespaceListerExpansion interface{}
diff --git a/pkg/client/gateway/clientset/versioned/clientset.go b/pkg/client/gateway/clientset/versioned/clientset.go
index c5bdecc855..6f1c0474a6 100644
--- a/pkg/client/gateway/clientset/versioned/clientset.go
+++ b/pkg/client/gateway/clientset/versioned/clientset.go
@@ -19,9 +19,9 @@ package versioned
import (
argoprojv1alpha1 "github.com/argoproj/argo-events/pkg/client/gateway/clientset/versioned/typed/gateway/v1alpha1"
- "k8s.io/client-go/discovery"
- "k8s.io/client-go/rest"
- "k8s.io/client-go/util/flowcontrol"
+ discovery "k8s.io/client-go/discovery"
+ rest "k8s.io/client-go/rest"
+ flowcontrol "k8s.io/client-go/util/flowcontrol"
)
type Interface interface {
diff --git a/pkg/client/gateway/clientset/versioned/fake/register.go b/pkg/client/gateway/clientset/versioned/fake/register.go
index 1c161557ca..482cd3ed6c 100644
--- a/pkg/client/gateway/clientset/versioned/fake/register.go
+++ b/pkg/client/gateway/clientset/versioned/fake/register.go
@@ -20,9 +20,9 @@ package fake
import (
argoprojv1alpha1 "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/runtime/serializer"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
diff --git a/pkg/client/gateway/clientset/versioned/scheme/register.go b/pkg/client/gateway/clientset/versioned/scheme/register.go
index a898aa64a3..c00d416561 100644
--- a/pkg/client/gateway/clientset/versioned/scheme/register.go
+++ b/pkg/client/gateway/clientset/versioned/scheme/register.go
@@ -20,9 +20,9 @@ package scheme
import (
argoprojv1alpha1 "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/runtime/serializer"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
diff --git a/pkg/client/gateway/clientset/versioned/typed/gateway/v1alpha1/fake/fake_gateway.go b/pkg/client/gateway/clientset/versioned/typed/gateway/v1alpha1/fake/fake_gateway.go
index d45336c1c4..1b40704496 100644
--- a/pkg/client/gateway/clientset/versioned/typed/gateway/v1alpha1/fake/fake_gateway.go
+++ b/pkg/client/gateway/clientset/versioned/typed/gateway/v1alpha1/fake/fake_gateway.go
@@ -18,13 +18,13 @@ limitations under the License.
package fake
import (
- "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
+ v1alpha1 "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/apimachinery/pkg/watch"
- "k8s.io/client-go/testing"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
)
// FakeGateways implements GatewayInterface
diff --git a/pkg/client/gateway/clientset/versioned/typed/gateway/v1alpha1/gateway.go b/pkg/client/gateway/clientset/versioned/typed/gateway/v1alpha1/gateway.go
index 8c865d51e8..0ab97cac9d 100644
--- a/pkg/client/gateway/clientset/versioned/typed/gateway/v1alpha1/gateway.go
+++ b/pkg/client/gateway/clientset/versioned/typed/gateway/v1alpha1/gateway.go
@@ -20,12 +20,12 @@ package v1alpha1
import (
"time"
- "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
- "github.com/argoproj/argo-events/pkg/client/gateway/clientset/versioned/scheme"
+ v1alpha1 "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
+ scheme "github.com/argoproj/argo-events/pkg/client/gateway/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/apimachinery/pkg/watch"
- "k8s.io/client-go/rest"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
)
// GatewaysGetter has a method to return a GatewayInterface.
diff --git a/pkg/client/gateway/clientset/versioned/typed/gateway/v1alpha1/gateway_client.go b/pkg/client/gateway/clientset/versioned/typed/gateway/v1alpha1/gateway_client.go
index 0c12e48a37..46429675de 100644
--- a/pkg/client/gateway/clientset/versioned/typed/gateway/v1alpha1/gateway_client.go
+++ b/pkg/client/gateway/clientset/versioned/typed/gateway/v1alpha1/gateway_client.go
@@ -18,9 +18,9 @@ limitations under the License.
package v1alpha1
import (
- "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
+ v1alpha1 "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
"github.com/argoproj/argo-events/pkg/client/gateway/clientset/versioned/scheme"
- "k8s.io/client-go/rest"
+ rest "k8s.io/client-go/rest"
)
type ArgoprojV1alpha1Interface interface {
diff --git a/pkg/client/gateway/informers/externalversions/factory.go b/pkg/client/gateway/informers/externalversions/factory.go
index a934e6b03d..b1ab6e593a 100644
--- a/pkg/client/gateway/informers/externalversions/factory.go
+++ b/pkg/client/gateway/informers/externalversions/factory.go
@@ -18,17 +18,17 @@ limitations under the License.
package externalversions
import (
- "reflect"
- "sync"
- "time"
+ reflect "reflect"
+ sync "sync"
+ time "time"
- "github.com/argoproj/argo-events/pkg/client/gateway/clientset/versioned"
+ versioned "github.com/argoproj/argo-events/pkg/client/gateway/clientset/versioned"
gateway "github.com/argoproj/argo-events/pkg/client/gateway/informers/externalversions/gateway"
- "github.com/argoproj/argo-events/pkg/client/gateway/informers/externalversions/internalinterfaces"
+ internalinterfaces "github.com/argoproj/argo-events/pkg/client/gateway/informers/externalversions/internalinterfaces"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/client-go/tools/cache"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ cache "k8s.io/client-go/tools/cache"
)
// SharedInformerOption defines the functional option type for SharedInformerFactory.
diff --git a/pkg/client/gateway/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/client/gateway/informers/externalversions/internalinterfaces/factory_interfaces.go
index 4e31536877..75a4850299 100644
--- a/pkg/client/gateway/informers/externalversions/internalinterfaces/factory_interfaces.go
+++ b/pkg/client/gateway/informers/externalversions/internalinterfaces/factory_interfaces.go
@@ -18,12 +18,12 @@ limitations under the License.
package internalinterfaces
import (
- "time"
+ time "time"
- "github.com/argoproj/argo-events/pkg/client/gateway/clientset/versioned"
+ versioned "github.com/argoproj/argo-events/pkg/client/gateway/clientset/versioned"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/client-go/tools/cache"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ cache "k8s.io/client-go/tools/cache"
)
// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.
diff --git a/pkg/client/sensor/clientset/versioned/clientset.go b/pkg/client/sensor/clientset/versioned/clientset.go
index 4fb577b381..420432dda1 100644
--- a/pkg/client/sensor/clientset/versioned/clientset.go
+++ b/pkg/client/sensor/clientset/versioned/clientset.go
@@ -19,9 +19,9 @@ package versioned
import (
argoprojv1alpha1 "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1"
- "k8s.io/client-go/discovery"
- "k8s.io/client-go/rest"
- "k8s.io/client-go/util/flowcontrol"
+ discovery "k8s.io/client-go/discovery"
+ rest "k8s.io/client-go/rest"
+ flowcontrol "k8s.io/client-go/util/flowcontrol"
)
type Interface interface {
diff --git a/pkg/client/sensor/clientset/versioned/fake/register.go b/pkg/client/sensor/clientset/versioned/fake/register.go
index 58e5e71371..57e50f01da 100644
--- a/pkg/client/sensor/clientset/versioned/fake/register.go
+++ b/pkg/client/sensor/clientset/versioned/fake/register.go
@@ -20,9 +20,9 @@ package fake
import (
argoprojv1alpha1 "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/runtime/serializer"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
diff --git a/pkg/client/sensor/clientset/versioned/scheme/register.go b/pkg/client/sensor/clientset/versioned/scheme/register.go
index 9bbfa27750..f73b48c8c0 100644
--- a/pkg/client/sensor/clientset/versioned/scheme/register.go
+++ b/pkg/client/sensor/clientset/versioned/scheme/register.go
@@ -20,9 +20,9 @@ package scheme
import (
argoprojv1alpha1 "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/runtime/serializer"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
diff --git a/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/fake/fake_sensor.go b/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/fake/fake_sensor.go
index 7f411564f4..00b05959d4 100644
--- a/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/fake/fake_sensor.go
+++ b/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/fake/fake_sensor.go
@@ -18,13 +18,13 @@ limitations under the License.
package fake
import (
- "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
+ v1alpha1 "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/apimachinery/pkg/watch"
- "k8s.io/client-go/testing"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
)
// FakeSensors implements SensorInterface
diff --git a/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/sensor.go b/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/sensor.go
index a350fe7d97..5ec1dd07a5 100644
--- a/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/sensor.go
+++ b/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/sensor.go
@@ -20,12 +20,12 @@ package v1alpha1
import (
"time"
- "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
- "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned/scheme"
+ v1alpha1 "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
+ scheme "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/apimachinery/pkg/watch"
- "k8s.io/client-go/rest"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
)
// SensorsGetter has a method to return a SensorInterface.
diff --git a/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/sensor_client.go b/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/sensor_client.go
index 14a2132a08..72e79ea81f 100644
--- a/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/sensor_client.go
+++ b/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/sensor_client.go
@@ -18,9 +18,9 @@ limitations under the License.
package v1alpha1
import (
- "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
+ v1alpha1 "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
"github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned/scheme"
- "k8s.io/client-go/rest"
+ rest "k8s.io/client-go/rest"
)
type ArgoprojV1alpha1Interface interface {
diff --git a/pkg/client/sensor/informers/externalversions/factory.go b/pkg/client/sensor/informers/externalversions/factory.go
index c96c58dbf0..32c44340f6 100644
--- a/pkg/client/sensor/informers/externalversions/factory.go
+++ b/pkg/client/sensor/informers/externalversions/factory.go
@@ -18,17 +18,17 @@ limitations under the License.
package externalversions
import (
- "reflect"
- "sync"
- "time"
+ reflect "reflect"
+ sync "sync"
+ time "time"
- "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned"
- "github.com/argoproj/argo-events/pkg/client/sensor/informers/externalversions/internalinterfaces"
+ versioned "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned"
+ internalinterfaces "github.com/argoproj/argo-events/pkg/client/sensor/informers/externalversions/internalinterfaces"
sensor "github.com/argoproj/argo-events/pkg/client/sensor/informers/externalversions/sensor"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/client-go/tools/cache"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ cache "k8s.io/client-go/tools/cache"
)
// SharedInformerOption defines the functional option type for SharedInformerFactory.
diff --git a/pkg/client/sensor/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/client/sensor/informers/externalversions/internalinterfaces/factory_interfaces.go
index 85a9b4dbcd..38a4b245eb 100644
--- a/pkg/client/sensor/informers/externalversions/internalinterfaces/factory_interfaces.go
+++ b/pkg/client/sensor/informers/externalversions/internalinterfaces/factory_interfaces.go
@@ -18,12 +18,12 @@ limitations under the License.
package internalinterfaces
import (
- "time"
+ time "time"
- "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned"
+ versioned "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/client-go/tools/cache"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ cache "k8s.io/client-go/tools/cache"
)
// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.
diff --git a/sensors/cmd/client.go b/sensors/cmd/client.go
index 3cd6182049..027d55e30d 100644
--- a/sensors/cmd/client.go
+++ b/sensors/cmd/client.go
@@ -41,7 +41,7 @@ func main() {
if !ok {
panic("sensor namespace is not provided")
}
- controllerInstanceID, ok := os.LookupEnv(common.EnvVarSensorControllerInstanceID)
+ controllerInstanceID, ok := os.LookupEnv(common.EnvVarControllerInstanceID)
if !ok {
panic("sensor controller instance ID is not provided")
}
diff --git a/sensors/event-handler.go b/sensors/event-handler.go
index eb056c3492..f19d17ba8d 100644
--- a/sensors/event-handler.go
+++ b/sensors/event-handler.go
@@ -37,14 +37,14 @@ func (sec *sensorExecutionCtx) processUpdateNotification(ew *updateNotification)
defer func() {
// persist updates to sensor resource
labels := map[string]string{
- common.LabelSensorName: sec.sensor.Name,
- common.LabelSensorKeyPhase: string(sec.sensor.Status.Phase),
- common.LabelKeySensorControllerInstanceID: sec.controllerInstanceID,
- common.LabelOperation: "persist_state_update",
+ common.LabelSensorName: sec.sensor.Name,
+ sn.LabelPhase: string(sec.sensor.Status.Phase),
+ sn.LabelControllerInstanceID: sec.controllerInstanceID,
+ common.LabelOperation: "persist_state_update",
}
eventType := common.StateChangeEventType
- updatedSensor, err := sn.PersistUpdates(sec.sensorClient, sec.sensor, sec.controllerInstanceID, sec.log)
+ updatedSensor, err := sn.PersistUpdates(sec.sensorClient, sec.sensor, sec.log)
if err != nil {
sec.log.WithError(err).Error("failed to persist sensor update, escalating...")
// escalate failure
@@ -202,7 +202,7 @@ func (sec *sensorExecutionCtx) WatchEventsFromGateways() {
case pc.NATS:
sec.NatsEventProtocol()
var err error
- if sec.sensor, err = sn.PersistUpdates(sec.sensorClient, sec.sensor, sec.controllerInstanceID, sec.log); err != nil {
+ if sec.sensor, err = sn.PersistUpdates(sec.sensorClient, sec.sensor, sec.log); err != nil {
sec.log.WithError(err).Error("failed to persist sensor update")
labels := map[string]string{
common.LabelEventType: string(common.OperationFailureEventType),
diff --git a/sensors/event-handler_test.go b/sensors/event-handler_test.go
deleted file mode 100644
index 9d6b3db0e8..0000000000
--- a/sensors/event-handler_test.go
+++ /dev/null
@@ -1,475 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package sensors
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io/ioutil"
- "k8s.io/apimachinery/pkg/runtime"
- "net/http"
- "strings"
- "testing"
- "time"
-
- "github.com/argoproj/argo-events/common"
- sensor2 "github.com/argoproj/argo-events/controllers/sensor"
- apicommon "github.com/argoproj/argo-events/pkg/apis/common"
- "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
- sensorFake "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned/fake"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/selection"
- dynamicFake "k8s.io/client-go/dynamic/fake"
- "k8s.io/client-go/kubernetes/fake"
-)
-
-var sensorStr = `
-apiVersion: argoproj.io/v1alpha1
-kind: Sensor
-metadata:
- name: test-sensor
- labels:
- sensors.argoproj.io/sensor-controller-instanceid: argo-events
-spec:
- template:
- containers:
- - name: "sensor"
- image: "argoproj/sensor"
- imagePullPolicy: Always
- serviceAccountName: argo-events-sa
- dependencies:
- - name: "test-gateway:test"
- eventProtocol:
- type: "HTTP"
- http:
- port: "9300"
- triggers:
- - template:
- name: test-workflow-trigger
- group: argoproj.io
- version: v1alpha1
- resource: workflows
- source:
- inline: |
- apiVersion: argoproj.io/v1alpha1
- kind: Workflow
- metadata:
- generateName: hello-world-
- spec:
- entrypoint: whalesay
- templates:
- - name: whalesay
- container:
- args:
- - "hello world"
- command:
- - cowsay
- image: "docker/whalesay:latest"
-`
-
-var podResourceList = metav1.APIResourceList{
- GroupVersion: metav1.GroupVersion{Group: "", Version: "v1"}.String(),
- APIResources: []metav1.APIResource{
- {Kind: "Pod", Namespaced: true, Name: "pods", SingularName: "pod", Group: "", Version: "v1", Verbs: []string{"create", "get"}},
- },
-}
-
-func getSensor() (*v1alpha1.Sensor, error) {
- var sensor v1alpha1.Sensor
- err := yaml.Unmarshal([]byte(sensorStr), &sensor)
- return &sensor, err
-}
-
-type mockHttpWriter struct {
- Status int
- Payload []byte
-}
-
-func (m *mockHttpWriter) Header() http.Header {
- return http.Header{}
-}
-
-func (m *mockHttpWriter) Write(p []byte) (int, error) {
- m.Payload = p
- return 0, nil
-}
-
-func (m *mockHttpWriter) WriteHeader(statusCode int) {
- m.Status = statusCode
-}
-
-func getsensorExecutionCtx(sensor *v1alpha1.Sensor) *sensorExecutionCtx {
- kubeClientset := fake.NewSimpleClientset()
- fakeDynamicClient := dynamicFake.NewSimpleDynamicClient(&runtime.Scheme{})
- return &sensorExecutionCtx{
- kubeClient: kubeClientset,
- dynamicClient: fakeDynamicClient,
- log: common.NewArgoEventsLogger(),
- sensorClient: sensorFake.NewSimpleClientset(),
- sensor: sensor,
- controllerInstanceID: "test-1",
- queue: make(chan *updateNotification),
- }
-}
-
-func getCloudEvent() *apicommon.Event {
- return &apicommon.Event{
- Context: apicommon.EventContext{
- CloudEventsVersion: common.CloudEventsVersion,
- EventID: fmt.Sprintf("%x", "123"),
- ContentType: "application/json",
- EventTime: metav1.MicroTime{Time: time.Now().UTC()},
- EventType: "test",
- EventTypeVersion: common.CloudEventsVersion,
- Source: &apicommon.URI{
- Host: common.DefaultEventSourceName("test-gateway", "test"),
- },
- },
- Payload: []byte(`{
- "x": "abc"
- }`),
- }
-}
-
-func TestDependencyGlobMatch(t *testing.T) {
- sensor, err := getSensor()
- convey.Convey("Given a sensor spec, create a sensor", t, func() {
- globDepName := "test-gateway:*"
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensor, convey.ShouldNotBeNil)
- sensor.Spec.Dependencies[0].Name = globDepName
- sec := getsensorExecutionCtx(sensor)
-
- sec.sensor, err = sec.sensorClient.ArgoprojV1alpha1().Sensors(sensor.Namespace).Create(sensor)
- convey.So(err, convey.ShouldBeNil)
-
- sec.sensor.Status.Nodes = make(map[string]v1alpha1.NodeStatus)
- fmt.Println(sensor.NodeID(globDepName))
-
- sensor2.InitializeNode(sec.sensor, globDepName, v1alpha1.NodeTypeEventDependency, sec.log, "node is init")
- sensor2.MarkNodePhase(sec.sensor, globDepName, v1alpha1.NodeTypeEventDependency, v1alpha1.NodePhaseActive, nil, sec.log, "node is active")
-
- sensor2.InitializeNode(sec.sensor, "test-workflow-trigger", v1alpha1.NodeTypeTrigger, sec.log, "trigger is init")
-
- e := &apicommon.Event{
- Payload: []byte("hello"),
- Context: apicommon.EventContext{
- Source: &apicommon.URI{
- Host: "test-gateway:test",
- },
- },
- }
- dataCh := make(chan *updateNotification)
- go func() {
- data := <-sec.queue
- dataCh <- data
- }()
- ok := sec.sendEventToInternalQueue(e, &mockHttpWriter{})
- convey.So(ok, convey.ShouldEqual, true)
- ew := <-dataCh
- sec.processUpdateNotification(ew)
- })
-}
-
-func TestEventHandler(t *testing.T) {
- sensor, err := getSensor()
- convey.Convey("Given a sensor spec, create a sensor", t, func() {
- convey.So(err, convey.ShouldBeNil)
- convey.So(sensor, convey.ShouldNotBeNil)
- sec := getsensorExecutionCtx(sensor)
-
- sec.sensor, err = sec.sensorClient.ArgoprojV1alpha1().Sensors(sensor.Namespace).Create(sensor)
- convey.So(err, convey.ShouldBeNil)
-
- sec.sensor.Status.Nodes = make(map[string]v1alpha1.NodeStatus)
- fmt.Println(sensor.NodeID("test-gateway:test"))
-
- sensor2.InitializeNode(sec.sensor, "test-gateway:test", v1alpha1.NodeTypeEventDependency, sec.log, "node is init")
- sensor2.MarkNodePhase(sec.sensor, "test-gateway:test", v1alpha1.NodeTypeEventDependency, v1alpha1.NodePhaseActive, nil, sec.log, "node is active")
-
- sensor2.InitializeNode(sec.sensor, "test-workflow-trigger", v1alpha1.NodeTypeTrigger, sec.log, "trigger is init")
-
- sec.processUpdateNotification(&updateNotification{
- event: getCloudEvent(),
- notificationType: v1alpha1.EventNotification,
- writer: &mockHttpWriter{},
- eventDependency: &v1alpha1.EventDependency{
- Name: "test-gateway:test",
- },
- })
-
- convey.Convey("Update sensor event dependencies", func() {
- sensor = sec.sensor.DeepCopy()
- sensor.Spec.Dependencies = append(sensor.Spec.Dependencies, v1alpha1.EventDependency{
- Name: "test-gateway:test2",
- })
- sec.processUpdateNotification(&updateNotification{
- event: nil,
- notificationType: v1alpha1.ResourceUpdateNotification,
- writer: &mockHttpWriter{},
- eventDependency: &v1alpha1.EventDependency{
- Name: "test-gateway:test2",
- },
- sensor: sensor,
- })
- convey.So(len(sec.sensor.Status.Nodes), convey.ShouldEqual, 3)
- })
-
- })
-}
-
-func TestDeleteStaleStatusNodes(t *testing.T) {
- convey.Convey("Given a sensor, delete the stale status nodes", t, func() {
- sensor, err := getSensor()
- convey.So(err, convey.ShouldBeNil)
- sec := getsensorExecutionCtx(sensor)
- nodeId1 := sensor.NodeID("test-gateway:test")
- nodeId2 := sensor.NodeID("test-gateway:test2")
- sec.sensor.Status.Nodes = map[string]v1alpha1.NodeStatus{
- nodeId1: v1alpha1.NodeStatus{
- Type: v1alpha1.NodeTypeEventDependency,
- Name: "test-gateway:test",
- Phase: v1alpha1.NodePhaseActive,
- ID: "1234",
- },
- nodeId2: v1alpha1.NodeStatus{
- Type: v1alpha1.NodeTypeEventDependency,
- Name: "test-gateway:test2",
- Phase: v1alpha1.NodePhaseActive,
- ID: "2345",
- },
- }
-
- _, ok := sec.sensor.Status.Nodes[nodeId1]
- convey.So(ok, convey.ShouldEqual, true)
- _, ok = sec.sensor.Status.Nodes[nodeId2]
- convey.So(ok, convey.ShouldEqual, true)
-
- sec.deleteStaleStatusNodes()
- convey.So(len(sec.sensor.Status.Nodes), convey.ShouldEqual, 1)
- _, ok = sec.sensor.Status.Nodes[nodeId1]
- convey.So(ok, convey.ShouldEqual, true)
- _, ok = sec.sensor.Status.Nodes[nodeId2]
- convey.So(ok, convey.ShouldEqual, false)
- })
-}
-
-func TestValidateEvent(t *testing.T) {
- convey.Convey("Given an event, validate it", t, func() {
- s, _ := getSensor()
- sec := getsensorExecutionCtx(s)
- dep, valid := sec.validateEvent(&apicommon.Event{
- Context: apicommon.EventContext{
- Source: &apicommon.URI{
- Host: "test-gateway:test",
- },
- },
- })
- convey.So(valid, convey.ShouldEqual, true)
- convey.So(dep, convey.ShouldNotBeNil)
- })
-}
-
-func TestParseEvent(t *testing.T) {
- convey.Convey("Given an event payload, parse event", t, func() {
- s, _ := getSensor()
- sec := getsensorExecutionCtx(s)
- e := &apicommon.Event{
- Payload: []byte("hello"),
- Context: apicommon.EventContext{
- Source: &apicommon.URI{
- Host: "test-gateway:test",
- },
- },
- }
- payload, err := json.Marshal(e)
- convey.So(err, convey.ShouldBeNil)
-
- event, err := sec.parseEvent(payload)
- convey.So(err, convey.ShouldBeNil)
- convey.So(string(event.Payload), convey.ShouldEqual, "hello")
- })
-}
-
-func TestSendToInternalQueue(t *testing.T) {
- convey.Convey("Given an event, send it on internal queue", t, func() {
- s, _ := getSensor()
- sec := getsensorExecutionCtx(s)
- e := &apicommon.Event{
- Payload: []byte("hello"),
- Context: apicommon.EventContext{
- Source: &apicommon.URI{
- Host: "test-gateway:test",
- },
- },
- }
- go func() {
- <-sec.queue
- }()
- ok := sec.sendEventToInternalQueue(e, &mockHttpWriter{})
- convey.So(ok, convey.ShouldEqual, true)
- })
-}
-
-func TestHandleHttpEventHandler(t *testing.T) {
- convey.Convey("Test http handler", t, func() {
- s, _ := getSensor()
- sec := getsensorExecutionCtx(s)
- e := &apicommon.Event{
- Payload: []byte("hello"),
- Context: apicommon.EventContext{
- Source: &apicommon.URI{
- Host: "test-gateway:test",
- },
- },
- }
- go func() {
- <-sec.queue
- }()
- payload, err := json.Marshal(e)
- convey.So(err, convey.ShouldBeNil)
- writer := &mockHttpWriter{}
- sec.httpEventHandler(writer, &http.Request{
- Body: ioutil.NopCloser(bytes.NewReader(payload)),
- })
- convey.So(writer.Status, convey.ShouldEqual, http.StatusOK)
- })
-}
-
-func TestSuccessNatsConnection(t *testing.T) {
- convey.Convey("Given a successful nats connection, generate K8s event", t, func() {
- s, _ := getSensor()
- sec := getsensorExecutionCtx(s)
- sec.successNatsConnection()
- req1, err := labels.NewRequirement(common.LabelOperation, selection.Equals, []string{"nats_connection_setup"})
- convey.So(err, convey.ShouldBeNil)
- req2, err := labels.NewRequirement(common.LabelEventType, selection.Equals, []string{string(common.OperationSuccessEventType)})
- convey.So(err, convey.ShouldBeNil)
- req3, err := labels.NewRequirement(common.LabelSensorName, selection.Equals, []string{string(sec.sensor.Name)})
- convey.So(err, convey.ShouldBeNil)
-
- eventList, err := sec.kubeClient.CoreV1().Events(sec.sensor.Namespace).List(metav1.ListOptions{
- LabelSelector: labels.NewSelector().Add([]labels.Requirement{*req1, *req2, *req3}...).String(),
- })
- convey.So(err, convey.ShouldBeNil)
- convey.So(len(eventList.Items), convey.ShouldEqual, 1)
- event := eventList.Items[0]
- convey.So(event.Reason, convey.ShouldEqual, "connection setup successfully")
- })
-}
-
-func TestEscalateNatsConnectionFailure(t *testing.T) {
- convey.Convey("Given a failed nats connection, escalate through K8s event", t, func() {
- s, _ := getSensor()
- sec := getsensorExecutionCtx(s)
- sec.escalateNatsConnectionFailure()
- req1, err := labels.NewRequirement(common.LabelOperation, selection.Equals, []string{"nats_connection_setup"})
- convey.So(err, convey.ShouldBeNil)
- req2, err := labels.NewRequirement(common.LabelEventType, selection.Equals, []string{string(common.OperationFailureEventType)})
- convey.So(err, convey.ShouldBeNil)
- req3, err := labels.NewRequirement(common.LabelSensorName, selection.Equals, []string{string(sec.sensor.Name)})
- convey.So(err, convey.ShouldBeNil)
-
- eventList, err := sec.kubeClient.CoreV1().Events(sec.sensor.Namespace).List(metav1.ListOptions{
- LabelSelector: labels.NewSelector().Add([]labels.Requirement{*req1, *req2, *req3}...).String(),
- })
- convey.So(err, convey.ShouldBeNil)
- convey.So(len(eventList.Items), convey.ShouldEqual, 1)
- event := eventList.Items[0]
- convey.So(event.Reason, convey.ShouldEqual, "connection setup failed")
- })
-}
-
-func TestSuccessNatsSubscription(t *testing.T) {
- convey.Convey("Given a successful nats subscription, generate K8s event", t, func() {
- s, _ := getSensor()
- eventSource := "fake"
- sec := getsensorExecutionCtx(s)
- sec.successNatsSubscription(eventSource)
- req1, err := labels.NewRequirement(common.LabelOperation, selection.Equals, []string{"nats_subscription_success"})
- convey.So(err, convey.ShouldBeNil)
- req2, err := labels.NewRequirement(common.LabelEventType, selection.Equals, []string{string(common.OperationSuccessEventType)})
- convey.So(err, convey.ShouldBeNil)
- req3, err := labels.NewRequirement(common.LabelSensorName, selection.Equals, []string{string(sec.sensor.Name)})
- convey.So(err, convey.ShouldBeNil)
- req4, err := labels.NewRequirement(common.LabelEventSource, selection.Equals, []string{strings.Replace(eventSource, ":", "_", -1)})
- convey.So(err, convey.ShouldBeNil)
-
- eventList, err := sec.kubeClient.CoreV1().Events(sec.sensor.Namespace).List(metav1.ListOptions{
- LabelSelector: labels.NewSelector().Add([]labels.Requirement{*req1, *req2, *req3, *req4}...).String(),
- })
- convey.So(err, convey.ShouldBeNil)
- convey.So(len(eventList.Items), convey.ShouldEqual, 1)
- event := eventList.Items[0]
- convey.So(event.Reason, convey.ShouldEqual, "nats subscription success")
- })
-}
-
-func TestEscalateNatsSubscriptionFailure(t *testing.T) {
- convey.Convey("Given a failed nats subscription, escalate K8s event", t, func() {
- s, _ := getSensor()
- eventSource := "fake"
- sec := getsensorExecutionCtx(s)
- sec.escalateNatsSubscriptionFailure(eventSource)
- req1, err := labels.NewRequirement(common.LabelOperation, selection.Equals, []string{"nats_subscription_failure"})
- convey.So(err, convey.ShouldBeNil)
- req2, err := labels.NewRequirement(common.LabelEventType, selection.Equals, []string{string(common.OperationFailureEventType)})
- convey.So(err, convey.ShouldBeNil)
- req3, err := labels.NewRequirement(common.LabelSensorName, selection.Equals, []string{string(sec.sensor.Name)})
- convey.So(err, convey.ShouldBeNil)
- req4, err := labels.NewRequirement(common.LabelEventSource, selection.Equals, []string{strings.Replace(eventSource, ":", "_", -1)})
- convey.So(err, convey.ShouldBeNil)
-
- eventList, err := sec.kubeClient.CoreV1().Events(sec.sensor.Namespace).List(metav1.ListOptions{
- LabelSelector: labels.NewSelector().Add([]labels.Requirement{*req1, *req2, *req3, *req4}...).String(),
- })
- convey.So(err, convey.ShouldBeNil)
- convey.So(len(eventList.Items), convey.ShouldEqual, 1)
- event := eventList.Items[0]
- convey.So(event.Reason, convey.ShouldEqual, "nats subscription failed")
- })
-}
-
-func TestProcessNatsMessage(t *testing.T) {
- convey.Convey("Given nats message, process it", t, func() {
- s, _ := getSensor()
- sec := getsensorExecutionCtx(s)
- e := &apicommon.Event{
- Payload: []byte("hello"),
- Context: apicommon.EventContext{
- Source: &apicommon.URI{
- Host: "test-gateway:test",
- },
- },
- }
- dataCh := make(chan []byte)
- go func() {
- data := <-sec.queue
- dataCh <- data.event.Payload
- }()
- payload, err := json.Marshal(e)
- convey.So(err, convey.ShouldBeNil)
- sec.processNatsMessage(payload, "fake")
- data := <-dataCh
- convey.So(data, convey.ShouldNotBeNil)
- convey.So(string(data), convey.ShouldEqual, "hello")
- })
-}
diff --git a/sensors/signal-filter_test.go b/sensors/signal-filter_test.go
deleted file mode 100644
index aae0e2d64a..0000000000
--- a/sensors/signal-filter_test.go
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-package sensors
-
-import (
- "reflect"
- "testing"
- "time"
-
- "github.com/argoproj/argo-events/common"
- apicommon "github.com/argoproj/argo-events/pkg/apis/common"
- "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
- "github.com/stretchr/testify/assert"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-func Test_filterTime(t *testing.T) {
- timeFilter := &v1alpha1.TimeFilter{
- Stop: "17:14:00",
- Start: "10:11:00",
- }
- event := getCloudEvent()
-
- currentT := time.Now().UTC()
- currentT = time.Date(currentT.Year(), currentT.Month(), currentT.Day(), 0, 0, 0, 0, time.UTC)
- currentTStr := currentT.Format(common.StandardYYYYMMDDFormat)
- parsedTime, err := time.Parse(common.StandardTimeFormat, currentTStr+" 16:36:34")
- assert.Nil(t, err)
- event.Context.EventTime = metav1.MicroTime{
- Time: parsedTime,
- }
- sensor, err := getSensor()
- assert.Nil(t, err)
- sOptCtx := getsensorExecutionCtx(sensor)
- valid, err := sOptCtx.filterTime(timeFilter, &event.Context.EventTime)
- assert.Nil(t, err)
- assert.Equal(t, true, valid)
-
- // test invalid event
- timeFilter.Start = "09:09:09"
- timeFilter.Stop = "09:10:09"
- valid, err = sOptCtx.filterTime(timeFilter, &event.Context.EventTime)
- assert.Nil(t, err)
- assert.Equal(t, false, valid)
-
- // test no stop
- timeFilter.Start = "09:09:09"
- timeFilter.Stop = ""
- valid, err = sOptCtx.filterTime(timeFilter, &event.Context.EventTime)
- assert.Nil(t, err)
- assert.Equal(t, true, valid)
-
- // test no start
- timeFilter.Start = ""
- timeFilter.Stop = "17:09:09"
- valid, err = sOptCtx.filterTime(timeFilter, &event.Context.EventTime)
- assert.Nil(t, err)
- assert.Equal(t, true, valid)
-}
-
-func Test_filterContext(t *testing.T) {
- event := getCloudEvent()
- assert.NotNil(t, event)
- sensor, err := getSensor()
- assert.Nil(t, err)
- sOptCtx := getsensorExecutionCtx(sensor)
- assert.NotNil(t, sOptCtx)
- testCtx := event.Context.DeepCopy()
- valid := sOptCtx.filterContext(testCtx, &event.Context)
- assert.Equal(t, true, valid)
- testCtx.Source.Host = "dummy source"
- valid = sOptCtx.filterContext(testCtx, &event.Context)
- assert.Equal(t, false, valid)
-}
-
-func Test_filterData(t *testing.T) {
- type args struct {
- data []v1alpha1.DataFilter
- event *apicommon.Event
- }
- tests := []struct {
- name string
- args args
- want bool
- wantErr bool
- }{
- {
- name: "nil event",
- args: args{data: nil, event: nil},
- want: true,
- wantErr: false,
- },
- {
- name: "unsupported content type",
- args: args{data: nil, event: &apicommon.Event{Payload: []byte("a")}},
- want: true,
- wantErr: false,
- },
- {
- name: "empty data",
- args: args{data: nil, event: &apicommon.Event{
- Context: apicommon.EventContext{
- ContentType: "application/json",
- },
- }},
- want: true,
- wantErr: false,
- },
- {
- name: "nil filters, JSON data",
- args: args{data: nil, event: &apicommon.Event{
- Context: apicommon.EventContext{
- ContentType: "application/json",
- },
- Payload: []byte("{\"k\": \"v\"}"),
- }},
- want: true,
- wantErr: false,
- },
- {
- name: "string filter, JSON data",
- args: args{
- data: []v1alpha1.DataFilter{
- {
- Path: "k",
- Type: v1alpha1.JSONTypeString,
- Value: []string{"v"},
- },
- },
- event: &apicommon.Event{
- Context: apicommon.EventContext{
- ContentType: "application/json",
- },
- Payload: []byte("{\"k\": \"v\"}"),
- },
- },
- want: true,
- wantErr: false,
- },
- {
- name: "number filter, JSON data",
- args: args{data: []v1alpha1.DataFilter{
- {
- Path: "k",
- Type: v1alpha1.JSONTypeNumber,
- Value: []string{"1.0"},
- },
- },
- event: &apicommon.Event{
- Context: apicommon.EventContext{
- ContentType: "application/json",
- },
- Payload: []byte("{\"k\": \"1.0\"}"),
- }},
- want: true,
- wantErr: false,
- },
- {
- name: "multiple filters, nested JSON data",
- args: args{
- data: []v1alpha1.DataFilter{
- {
- Path: "k",
- Type: v1alpha1.JSONTypeString,
- Value: []string{"v"},
- },
- {
- Path: "k1.k",
- Type: v1alpha1.JSONTypeNumber,
- Value: []string{"3.14"},
- },
- {
- Path: "k1.k2",
- Type: v1alpha1.JSONTypeString,
- Value: []string{"hello,world", "hello there"},
- },
- },
- event: &apicommon.Event{
- Context: apicommon.EventContext{
- ContentType: "application/json",
- },
- Payload: []byte("{\"k\": true, \"k1\": {\"k\": 3.14, \"k2\": \"hello, world\"}}"),
- }},
- want: false,
- wantErr: false,
- },
- }
- sensor, err := getSensor()
- assert.Nil(t, err)
- sOptCtx := getsensorExecutionCtx(sensor)
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, err := sOptCtx.filterData(tt.args.data, tt.args.event)
- if (err != nil) != tt.wantErr {
- t.Errorf("filterData() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if got != tt.want {
- t.Errorf("filterData() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func Test_mapIsSubset(t *testing.T) {
- type args struct {
- sub map[string]string
- m map[string]string
- }
- tests := []struct {
- name string
- args args
- want bool
- }{
- {
- name: "nil sub, nil map",
- args: args{sub: nil, m: nil},
- want: true,
- },
- {
- name: "empty sub, empty map",
- args: args{sub: make(map[string]string), m: make(map[string]string)},
- want: true,
- },
- {
- name: "empty sub, non-empty map",
- args: args{sub: make(map[string]string), m: map[string]string{"k": "v"}},
- want: true,
- },
- {
- name: "disjoint",
- args: args{sub: map[string]string{"k1": "v1"}, m: map[string]string{"k": "v"}},
- want: false,
- },
- {
- name: "subset",
- args: args{sub: map[string]string{"k1": "v1"}, m: map[string]string{"k": "v", "k1": "v1"}},
- want: true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := mapIsSubset(tt.args.sub, tt.args.m); got != tt.want {
- t.Errorf("mapIsSubset() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-// this test is meant to cover the missing cases for those not covered in eventDependency-filter_test.go and trigger-params_test.go
-func Test_renderEventDataAsJSON(t *testing.T) {
- type args struct {
- e *apicommon.Event
- }
- tests := []struct {
- name string
- args args
- want []byte
- wantErr bool
- }{
- {
- name: "nil event",
- args: args{e: nil},
- want: nil,
- wantErr: true,
- },
- {
- name: "missing content type",
- args: args{e: &apicommon.Event{}},
- want: nil,
- wantErr: true,
- },
- {
- name: "valid yaml content",
- args: args{e: &apicommon.Event{
- Context: apicommon.EventContext{
- ContentType: MediaTypeYAML,
- },
- Payload: []byte(`apiVersion: v1alpha1`),
- }},
- want: []byte(`{"apiVersion":"v1alpha1"}`),
- wantErr: false,
- },
- {
- name: "json content marked as yaml",
- args: args{e: &apicommon.Event{
- Context: apicommon.EventContext{
- ContentType: MediaTypeYAML,
- },
- Payload: []byte(`{"apiVersion":5}`),
- }},
- want: []byte(`{"apiVersion":5}`),
- wantErr: false,
- },
- {
- name: "invalid json content",
- args: args{e: &apicommon.Event{
- Context: apicommon.EventContext{
- ContentType: MediaTypeJSON,
- },
- Payload: []byte(`{5:"numberkey"}`),
- }},
- want: nil,
- wantErr: true,
- },
- {
- name: "invalid yaml content",
- args: args{e: &apicommon.Event{
- Context: apicommon.EventContext{
- ContentType: MediaTypeYAML,
- },
- Payload: []byte(`%\x786`),
- }},
- want: nil,
- wantErr: true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, err := renderEventDataAsJSON(tt.args.e)
- if (err != nil) != tt.wantErr {
- t.Errorf("renderEventDataAsJSON() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("renderEventDataAsJSON() = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/sensors/trigger-params_test.go b/sensors/trigger-params_test.go
deleted file mode 100644
index 36e70a95fb..0000000000
--- a/sensors/trigger-params_test.go
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package sensors
-
-import (
- "reflect"
- "testing"
-
- "fmt"
-
- apicommon "github.com/argoproj/argo-events/pkg/apis/common"
- "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
-)
-
-func Test_applyParams(t *testing.T) {
- defaultValue := "default"
- events := map[string]apicommon.Event{
- "simpleJSON": {
- Context: apicommon.EventContext{
- ContentType: MediaTypeJSON,
- },
- Payload: []byte(`{"name":{"first":"matt","last":"magaldi"},"age":24}`),
- },
- "nonJSON": {
- Context: apicommon.EventContext{
- ContentType: MediaTypeJSON,
- },
- Payload: []byte(`apiVersion: v1alpha1`),
- },
- }
- type args struct {
- jsonObj []byte
- params []v1alpha1.TriggerParameter
- events map[string]apicommon.Event
- }
- tests := []struct {
- name string
- args args
- want []byte
- wantErr bool
- }{
- {
- name: "no event and missing default -> error",
- args: args{
- jsonObj: []byte(""),
- params: []v1alpha1.TriggerParameter{
- {
- Src: &v1alpha1.TriggerParameterSource{
- Event: "missing",
- },
- },
- },
- events: events,
- },
- want: nil,
- wantErr: true,
- },
- {
- name: "no event with default -> success",
- args: args{
- jsonObj: []byte(""),
- params: []v1alpha1.TriggerParameter{
- {
- Src: &v1alpha1.TriggerParameterSource{
- Event: "missing",
- Value: &defaultValue,
- },
- Dest: "x",
- },
- },
- events: events,
- },
- want: []byte(`{"x":"default"}`),
- wantErr: false,
- },
- {
- name: "no event with default, but missing dest -> error",
- args: args{
- jsonObj: []byte(""),
- params: []v1alpha1.TriggerParameter{
- {
- Src: &v1alpha1.TriggerParameterSource{
- Event: "missing",
- Value: &defaultValue,
- },
- },
- },
- events: events,
- },
- want: nil,
- wantErr: true,
- },
- {
- name: "simpleJSON (new field) -> success",
- args: args{
- jsonObj: []byte(``),
- params: []v1alpha1.TriggerParameter{
- {
- Src: &v1alpha1.TriggerParameterSource{
- Event: "simpleJSON",
- Path: "name.last",
- },
- Dest: "x",
- },
- },
- events: events,
- },
- want: []byte(`{"x":"magaldi"}`),
- wantErr: false,
- },
- {
- name: "simpleJSON (updated field) -> success",
- args: args{
- jsonObj: []byte(`{"x":"before"}`),
- params: []v1alpha1.TriggerParameter{
- {
- Src: &v1alpha1.TriggerParameterSource{
- Event: "simpleJSON",
- Path: "name.last",
- },
- Dest: "x",
- },
- },
- events: events,
- },
- want: []byte(`{"x":"magaldi"}`),
- wantErr: false,
- },
- {
- name: "simpleJSON (prepended field) -> success",
- args: args{
- jsonObj: []byte(`{"x":"before"}`),
- params: []v1alpha1.TriggerParameter{
- {
- Src: &v1alpha1.TriggerParameterSource{
- Event: "simpleJSON",
- Path: "name.last",
- },
- Dest: "x",
- Operation: v1alpha1.TriggerParameterOpPrepend,
- },
- },
- events: events,
- },
- want: []byte(`{"x":"magaldibefore"}`),
- wantErr: false,
- },
- {
- name: "simpleJSON (appended field) -> success",
- args: args{
- jsonObj: []byte(`{"x":"before"}`),
- params: []v1alpha1.TriggerParameter{
- {
- Src: &v1alpha1.TriggerParameterSource{
- Event: "simpleJSON",
- Path: "name.last",
- },
- Dest: "x",
- Operation: v1alpha1.TriggerParameterOpAppend,
- },
- },
- events: events,
- },
- want: []byte(`{"x":"beforemagaldi"}`),
- wantErr: false,
- },
- {
- name: "non JSON, no default -> pass payload bytes without converting",
- args: args{
- jsonObj: []byte(``),
- params: []v1alpha1.TriggerParameter{
- {
- Src: &v1alpha1.TriggerParameterSource{
- Event: "nonJSON",
- },
- Dest: "x",
- },
- },
- events: events,
- },
- want: []byte(fmt.Sprintf(`{"x":"%s"}`, string(events["nonJSON"].Payload))),
- wantErr: false,
- },
- {
- name: "non JSON, with path -> error",
- args: args{
- jsonObj: []byte(``),
- params: []v1alpha1.TriggerParameter{
- {
- Src: &v1alpha1.TriggerParameterSource{
- Event: "nonJSON",
- Path: "test",
- },
- Dest: "x",
- },
- },
- events: events,
- },
- want: nil,
- wantErr: true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, err := applyParams(tt.args.jsonObj, tt.args.params, tt.args.events)
- if (err != nil) != tt.wantErr {
- t.Errorf("applyParams() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("applyParams() = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/sensors/trigger.go b/sensors/trigger.go
index 9c7904e569..88c797ff06 100644
--- a/sensors/trigger.go
+++ b/sensors/trigger.go
@@ -19,7 +19,6 @@ package sensors
import (
"encoding/json"
"fmt"
-
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/Knetic/govaluate"
@@ -46,11 +45,7 @@ func (sec *sensorExecutionCtx) canProcessTriggers() (bool, error) {
group:
for _, group := range sec.sensor.Spec.DependencyGroups {
for _, dependency := range group.Dependencies {
- nodeStatus := sn.GetNodeByName(sec.sensor, dependency)
- if nodeStatus == nil {
- return false, fmt.Errorf("failed to get a dependency: %+v", dependency)
- }
- if nodeStatus.Phase != v1alpha1.NodePhaseComplete {
+ if nodeStatus := sn.GetNodeByName(sec.sensor, dependency); nodeStatus.Phase != v1alpha1.NodePhaseComplete {
groups[group.Name] = false
continue group
}
@@ -253,7 +248,7 @@ func (sec *sensorExecutionCtx) applyTriggerPolicy(trigger *v1alpha1.Trigger, res
Factor: trigger.Policy.Backoff.Factor,
Jitter: trigger.Policy.Backoff.Jitter,
}, func() (bool, error) {
- obj, err := resourceInterface.Get(name, metav1.GetOptions{})
+ obj, err := resourceInterface.Namespace(namespace).Get(name, metav1.GetOptions{})
if err != nil {
sec.log.WithError(err).WithField("resource-name", obj.GetName()).Error("failed to get triggered resource")
return false, nil
@@ -332,7 +327,7 @@ func (sec *sensorExecutionCtx) createResourceObject(trigger *v1alpha1.Trigger, o
Resource: trigger.Template.Resource,
})
- liveObj, err := dynamicResInterface.Namespace(obj.GetNamespace()).Create(obj, metav1.CreateOptions{})
+ liveObj, err := dynamicResInterface.Namespace(namespace).Create(obj, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create resource object. err: %+v", err)
}
diff --git a/sensors/trigger_test.go b/sensors/trigger_test.go
deleted file mode 100644
index e262e638b5..0000000000
--- a/sensors/trigger_test.go
+++ /dev/null
@@ -1,457 +0,0 @@
-/*
-Copyright 2018 BlackRock, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package sensors
-
-//
-//import (
-// "encoding/json"
-// "testing"
-//
-// apicommon "github.com/argoproj/argo-events/pkg/apis/common"
-// "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
-// "github.com/smartystreets/goconvey/convey"
-// corev1 "k8s.io/api/core/v1"
-// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-// "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
-// "k8s.io/apimachinery/pkg/labels"
-// "k8s.io/apimachinery/pkg/runtime"
-// "k8s.io/apimachinery/pkg/runtime/schema"
-// "k8s.io/apimachinery/pkg/types"
-// "k8s.io/apimachinery/pkg/watch"
-// "k8s.io/client-go/dynamic"
-// dynamicfake "k8s.io/client-go/dynamic/fake"
-// "k8s.io/client-go/kubernetes/fake"
-// kTesting "k8s.io/client-go/testing"
-// "k8s.io/client-go/util/flowcontrol"
-//)
-//
-//var successLabels = map[string]string{
-// "success-label": "fake",
-//}
-//
-//var failureLabels = map[string]string{
-// "failure-label": "fake",
-//}
-//
-//var podTemplate = &corev1.Pod{
-// TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"},
-// Spec: corev1.PodSpec{
-// Containers: []corev1.Container{
-// {
-// Name: "test1",
-// Image: "docker/whalesay",
-// },
-// },
-// },
-//}
-//
-//var triggerTemplate = v1alpha1.Trigger{
-// Template: &v1alpha1.TriggerTemplate{
-// GroupVersionResource: &metav1.GroupVersionResource{
-// Resource: "pods",
-// Version: "v1",
-// },
-// },
-//}
-//
-//func getUnstructured(res interface{}) (*unstructured.Unstructured, error) {
-// obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(res)
-// if err != nil {
-// return nil, err
-// }
-// return &unstructured.Unstructured{Object: obj}, nil
-//}
-//
-//func TestProcessTrigger(t *testing.T) {
-// convey.Convey("Given a sensor", t, func() {
-// trigger := *triggerTemplate.DeepCopy()
-// trigger.Template.Name = "testTrigger"
-// pod := podTemplate.DeepCopy()
-// pod.Name = "testTrigger"
-// uObj, err := getUnstructured(pod)
-// convey.So(err, convey.ShouldBeNil)
-// trigger.Template.Source = &v1alpha1.ArtifactLocation{
-// Resource: uObj,
-// }
-// testSensor, err := getSensor()
-// convey.So(err, convey.ShouldBeNil)
-// soc := getsensorExecutionCtx(testSensor)
-// err = soc.executeTrigger(trigger)
-// convey.So(err, convey.ShouldBeNil)
-// })
-//}
-//
-//type FakeName struct {
-// First string `json:"first"`
-// Last string `json:"last"`
-//}
-//
-//type fakeEvent struct {
-// Name string `json:"name"`
-// Namespace string `json:"namespace"`
-// Group string `json:"group"`
-// GenerateName string `json:"generateName"`
-// Kind string `json:"kind"`
-//}
-//
-//func TestTriggerParameterization(t *testing.T) {
-// convey.Convey("Given an event, parameterize the trigger", t, func() {
-// testSensor, err := getSensor()
-// convey.So(err, convey.ShouldBeNil)
-// soc := getsensorExecutionCtx(testSensor)
-// triggerName := "test-workflow-trigger"
-// dependency := "test-gateway:test"
-//
-// fe := &fakeEvent{
-// Namespace: "fake-namespace",
-// Name: "fake",
-// Group: "v1",
-// GenerateName: "fake-",
-// Kind: "Deployment",
-// }
-// eventBytes, err := json.Marshal(fe)
-// convey.So(err, convey.ShouldBeNil)
-//
-// node := v1alpha1.NodeStatus{
-// Event: &apicommon.Event{
-// Payload: eventBytes,
-// Context: apicommon.EventContext{
-// Source: &apicommon.URI{
-// Host: dependency,
-// },
-// ContentType: "application/json",
-// },
-// },
-// Name: dependency,
-// Type: v1alpha1.NodeTypeEventDependency,
-// ID: "1234",
-// Phase: v1alpha1.NodePhaseActive,
-// }
-//
-// trigger := triggerTemplate.DeepCopy()
-// trigger.Template.Name = triggerName
-//
-// trigger.TemplateParameters = []v1alpha1.TriggerParameter{
-// {
-// Src: &v1alpha1.TriggerParameterSource{
-// Event: dependency,
-// Path: "name",
-// },
-// Dest: "name",
-// },
-// }
-//
-// trigger.ResourceParameters = []v1alpha1.TriggerParameter{
-// {
-// Src: &v1alpha1.TriggerParameterSource{
-// Event: dependency,
-// Path: "name",
-// },
-// Dest: "metadata.generateName",
-// },
-// }
-//
-// nodeId := soc.sensor.NodeID(dependency)
-// wfNodeId := soc.sensor.NodeID(triggerName)
-//
-// wfnode := v1alpha1.NodeStatus{
-// Event: &apicommon.Event{
-// Payload: eventBytes,
-// Context: apicommon.EventContext{
-// Source: &apicommon.URI{
-// Host: dependency,
-// },
-// ContentType: "application/json",
-// },
-// },
-// Name: triggerName,
-// Type: v1alpha1.NodeTypeTrigger,
-// ID: "1234",
-// Phase: v1alpha1.NodePhaseNew,
-// }
-//
-// soc.sensor.Status.Nodes = map[string]v1alpha1.NodeStatus{
-// nodeId: node,
-// wfNodeId: wfnode,
-// }
-//
-// err = soc.applyParamsTrigger(trigger)
-// convey.So(err, convey.ShouldBeNil)
-// convey.So(trigger.Template.Name, convey.ShouldEqual, fe.Name)
-//
-// rObj := podTemplate.DeepCopy()
-// rObj.Name = "testTrigger"
-// uObj, err := getUnstructured(rObj)
-// convey.So(err, convey.ShouldBeNil)
-//
-// err = soc.applyParamsResource(trigger.ResourceParameters, uObj)
-// convey.So(err, convey.ShouldBeNil)
-//
-// })
-//}
-//
-//func TestTriggerPolicy(t *testing.T) {
-// convey.Convey("Given a trigger, apply policy", t, func() {
-// testSensor, err := getSensor()
-// convey.So(err, convey.ShouldBeNil)
-// soc := getsensorExecutionCtx(testSensor)
-//
-// trigger1 := triggerTemplate.DeepCopy()
-// trigger2 := triggerTemplate.DeepCopy()
-//
-// trigger1.Template.Name = "testTrigger1"
-// trigger2.Template.Name = "testTrigger2"
-//
-// triggerPod1 := podTemplate.DeepCopy()
-// triggerPod2 := podTemplate.DeepCopy()
-//
-// triggerPod1.Name = "testPod1"
-// triggerPod2.Name = "testPod2"
-//
-// triggerPod1.Labels = successLabels
-// triggerPod2.Labels = failureLabels
-//
-// uObj1, err := getUnstructured(triggerPod1)
-// convey.So(err, convey.ShouldBeNil)
-//
-// uObj2, err := getUnstructured(triggerPod2)
-// convey.So(err, convey.ShouldBeNil)
-//
-// backoff := v1alpha1.Backoff{
-// Duration: 1000000000,
-// Factor: 2,
-// Steps: 10,
-// }
-//
-// trigger1.Template.Source = &v1alpha1.ArtifactLocation{
-// Resource: uObj1,
-// }
-// trigger1.Policy = &v1alpha1.TriggerPolicy{
-// Backoff: backoff,
-// State: &v1alpha1.TriggerStateLabels{
-// Success: successLabels,
-// },
-// }
-//
-// trigger2.Template.Source = &v1alpha1.ArtifactLocation{
-// Resource: uObj2,
-// }
-// trigger2.Policy = &v1alpha1.TriggerPolicy{
-// Backoff: backoff,
-// State: &v1alpha1.TriggerStateLabels{
-// Failure: failureLabels,
-// },
-// }
-//
-// convey.Convey("Execute the first trigger and make sure the trigger execution results in success", func() {
-// err = soc.executeTrigger(*trigger1)
-// convey.So(err, convey.ShouldBeNil)
-// })
-//
-// convey.Convey("Execute the second trigger and make sure the trigger execution results in failure", func() {
-// err = soc.executeTrigger(*trigger2)
-// convey.So(err, convey.ShouldNotBeNil)
-// })
-//
-// // modify backoff so that applyPolicy doesnt wait too much
-// trigger1.Policy.Backoff = v1alpha1.Backoff{
-// Steps: 2,
-// Duration: 1000000000,
-// Factor: 1,
-// }
-//
-// triggerPod1.Labels = nil
-// uObj1, err = getUnstructured(triggerPod1)
-// convey.So(err, convey.ShouldBeNil)
-// trigger1.Template.Source.Resource = uObj1
-//
-// convey.Convey("If trigger times out and error on timeout is set, trigger execution must fail", func() {
-// trigger1.Policy.ErrorOnBackoffTimeout = true
-// err = soc.executeTrigger(*trigger1)
-// convey.So(err, convey.ShouldNotBeNil)
-// })
-//
-// convey.Convey("If trigger times out and error on timeout is not set, trigger execution must succeed", func() {
-// trigger1.Policy.ErrorOnBackoffTimeout = false
-// err = soc.executeTrigger(*trigger1)
-// convey.So(err, convey.ShouldBeNil)
-// })
-// })
-//}
-//
-//func TestCreateResourceObject(t *testing.T) {
-// convey.Convey("Given a trigger", t, func() {
-// testSensor, err := getSensor()
-// convey.So(err, convey.ShouldBeNil)
-// soc := getsensorExecutionCtx(testSensor)
-// fakeclient := soc.dynamicClient.(*FakeClientPool).Fake
-// dynamicClient := dynamicfake.FakeResourceClient{Resource: schema.GroupVersionResource{Version: "v1", Resource: "pods"}, Fake: &fakeclient}
-//
-// convey.Convey("Given a pod spec, create a pod trigger", func() {
-// pod := podTemplate.DeepCopy()
-// pod.Name = "testTrigger"
-// pod.Namespace = "foo"
-// uObj, err := getUnstructured(pod)
-// convey.So(err, convey.ShouldBeNil)
-//
-// trigger := triggerTemplate.DeepCopy()
-// trigger.Template.Name = "trigger"
-//
-// trigger.Template.Source = &v1alpha1.ArtifactLocation{
-// Resource: uObj,
-// }
-//
-// convey.Println(trigger.Template.Source)
-//
-// err = soc.createResourceObject(trigger, uObj)
-// convey.So(err, convey.ShouldBeNil)
-//
-// unstructuredPod, err := dynamicClient.Get(pod.Name, metav1.GetOptions{})
-// convey.So(err, convey.ShouldBeNil)
-// convey.So(unstructuredPod.GetNamespace(), convey.ShouldEqual, "foo")
-// })
-//
-// convey.Convey("Given a pod without namespace,create a pod trigger", func() {
-// pod := podTemplate.DeepCopy()
-// pod.Name = "testTrigger"
-// uObj, err := getUnstructured(pod)
-// convey.So(err, convey.ShouldBeNil)
-//
-// trigger := triggerTemplate.DeepCopy()
-// trigger.Template.Name = "trigger"
-//
-// trigger.Template.Source = &v1alpha1.ArtifactLocation{
-// Resource: uObj,
-// }
-//
-// err = soc.createResourceObject(trigger, uObj)
-// convey.So(err, convey.ShouldBeNil)
-//
-// unstructuredPod, err := dynamicClient.Get(pod.Name, metav1.GetOptions{})
-// convey.So(err, convey.ShouldBeNil)
-// convey.So(unstructuredPod.GetNamespace(), convey.ShouldEqual, testSensor.Namespace)
-// })
-// })
-//}
-//
-//func TestExtractEvents(t *testing.T) {
-// convey.Convey("Given a sensor, extract events", t, func() {
-// sensor, _ := getSensor()
-// sec := getsensorExecutionCtx(sensor)
-// id := sensor.NodeID("test-gateway:test")
-// sensor.Status.Nodes = map[string]v1alpha1.NodeStatus{
-// id: {
-// Type: v1alpha1.NodeTypeEventDependency,
-// Event: &apicommon.Event{
-// Payload: []byte("hello"),
-// Context: apicommon.EventContext{
-// Source: &apicommon.URI{
-// Host: "test-gateway:test",
-// },
-// },
-// },
-// },
-// }
-// extractedEvents := sec.extractEvents([]v1alpha1.TriggerParameter{
-// {
-// Src: &v1alpha1.TriggerParameterSource{
-// Event: "test-gateway:test",
-// },
-// Dest: "fake-dest",
-// },
-// })
-// convey.So(len(extractedEvents), convey.ShouldEqual, 1)
-// })
-//}
-//
-//func TestCanProcessTriggers(t *testing.T) {
-// convey.Convey("Given a sensor, test if triggers can be processed", t, func() {
-// sensor, err := getSensor()
-// convey.So(err, convey.ShouldBeNil)
-//
-// sensor.Status.Nodes = map[string]v1alpha1.NodeStatus{
-// sensor.NodeID(sensor.Spec.Dependencies[0].Name): {
-// Name: sensor.Spec.Dependencies[0].Name,
-// Phase: v1alpha1.NodePhaseComplete,
-// Type: v1alpha1.NodeTypeEventDependency,
-// },
-// }
-//
-// for _, dep := range []v1alpha1.EventDependency{
-// {
-// Name: "test-gateway:test2",
-// },
-// {
-// Name: "test-gateway:test3",
-// },
-// } {
-// sensor.Spec.Dependencies = append(sensor.Spec.Dependencies, dep)
-// sensor.Status.Nodes[sensor.NodeID(dep.Name)] = v1alpha1.NodeStatus{
-// Name: dep.Name,
-// Phase: v1alpha1.NodePhaseComplete,
-// Type: v1alpha1.NodeTypeEventDependency,
-// }
-// }
-//
-// soc := getsensorExecutionCtx(sensor)
-// ok, err := soc.canProcessTriggers()
-// convey.So(err, convey.ShouldBeNil)
-// convey.So(ok, convey.ShouldEqual, true)
-//
-// node := sensor.Status.Nodes[sensor.NodeID("test-gateway:test2")]
-// node.Phase = v1alpha1.NodePhaseNew
-// sensor.Status.Nodes[sensor.NodeID("test-gateway:test2")] = node
-//
-// ok, err = soc.canProcessTriggers()
-// convey.So(err, convey.ShouldBeNil)
-// convey.So(ok, convey.ShouldEqual, false)
-//
-// convey.Convey("Add dependency groups and evaluate the circuit", func() {
-// for _, depGroup := range []v1alpha1.DependencyGroup{
-// {
-// Name: "depg1",
-// Dependencies: []string{sensor.Spec.Dependencies[1].Name, sensor.Spec.Dependencies[2].Name},
-// },
-// {
-// Name: "depg2",
-// Dependencies: []string{sensor.Spec.Dependencies[0].Name},
-// },
-// } {
-// sensor.Spec.DependencyGroups = append(sensor.Spec.DependencyGroups, depGroup)
-// sensor.Status.Nodes[sensor.NodeID(depGroup.Name)] = v1alpha1.NodeStatus{
-// Name: depGroup.Name,
-// Phase: v1alpha1.NodePhaseNew,
-// }
-// }
-//
-// sensor.Spec.Circuit = "depg1 || depg2"
-//
-// ok, err = soc.canProcessTriggers()
-// convey.So(err, convey.ShouldBeNil)
-// convey.So(ok, convey.ShouldEqual, true)
-// })
-//
-// convey.Convey("If the previous round of triggers failed and error on previous round policy is set, then don't execute the triggers", func() {
-// sensor.Spec.ErrorOnFailedRound = true
-// sensor.Status.TriggerCycleStatus = v1alpha1.TriggerCycleFailure
-//
-// ok, err = soc.canProcessTriggers()
-// convey.So(err, convey.ShouldNotBeNil)
-// convey.So(ok, convey.ShouldEqual, false)
-// })
-// })
-//}
diff --git a/store/configmap_test.go b/store/configmap_test.go
index f5d9cb09c0..a66b4c5bca 100644
--- a/store/configmap_test.go
+++ b/store/configmap_test.go
@@ -53,7 +53,7 @@ spec:
convey.So(err, convey.ShouldBeNil)
convey.So(cmReader, convey.ShouldNotBeNil)
- convey.Convey("Create a workflow from configmap artifact", func() {
+ convey.Convey("Create a workflow from configmap minio", func() {
resourceBody, err := cmReader.Read()
convey.So(err, convey.ShouldBeNil)
diff --git a/store/creds.go b/store/creds.go
index 5aae8aadbc..143dff77ca 100644
--- a/store/creds.go
+++ b/store/creds.go
@@ -28,13 +28,13 @@ import (
"k8s.io/client-go/kubernetes"
)
-// Credentials contains the information necessary to access the artifact
+// Credentials contains the information necessary to access the minio
type Credentials struct {
accessKey string
secretKey string
}
-// GetCredentials for this artifact
+// GetCredentials for this minio
func GetCredentials(kubeClient kubernetes.Interface, namespace string, art *v1alpha1.ArtifactLocation) (*Credentials, error) {
if art.S3 != nil {
accessKey, err := GetSecrets(kubeClient, namespace, art.S3.AccessKey.Name, art.S3.AccessKey.Key)
diff --git a/store/creds_test.go b/store/creds_test.go
index 160b5f7e48..01acbd60bc 100644
--- a/store/creds_test.go
+++ b/store/creds_test.go
@@ -41,13 +41,13 @@ func TestGetCredentials(t *testing.T) {
_, err := fakeClient.CoreV1().Secrets("testing").Create(mySecretCredentials)
assert.Nil(t, err)
- // creds should be nil for unknown artifact type
+ // creds should be nil for unknown minio type
unknownArtifact := &v1alpha1.ArtifactLocation{}
creds, err := GetCredentials(fakeClient, "testing", unknownArtifact)
assert.Nil(t, creds)
assert.Nil(t, err)
- // succeed for S3 artifact type
+ // succeed for S3 minio type
s3Artifact := &v1alpha1.ArtifactLocation{
S3: &apicommon.S3Artifact{
AccessKey: &apiv1.SecretKeySelector{
diff --git a/store/git_test.go b/store/git_test.go
index 9f3432dfd5..decb8c4fec 100644
--- a/store/git_test.go
+++ b/store/git_test.go
@@ -84,7 +84,7 @@ func TestGetGitAuth(t *testing.T) {
}
func TestGetBranchOrTag(t *testing.T) {
- convey.Convey("Given a git artifact, get the branch or tag", t, func() {
+ convey.Convey("Given a git minio, get the branch or tag", t, func() {
br := gar.getBranchOrTag()
convey.So(br.Branch, convey.ShouldEqual, "refs/heads/master")
gar.artifact.Branch = "br"
@@ -95,7 +95,7 @@ func TestGetBranchOrTag(t *testing.T) {
convey.So(tag.Branch, convey.ShouldNotEqual, "refs/heads/master")
})
- convey.Convey("Given a git artifact with a specific ref, get the ref", t, func() {
+ convey.Convey("Given a git minio with a specific ref, get the ref", t, func() {
gar.artifact.Ref = "refs/something/weird/or/specific"
br := gar.getBranchOrTag()
convey.So(br.Branch, convey.ShouldEqual, "refs/something/weird/or/specific")
diff --git a/store/resource.go b/store/resource.go
index e884771fe7..ea6d58a8d3 100644
--- a/store/resource.go
+++ b/store/resource.go
@@ -38,6 +38,6 @@ func NewResourceReader(resourceArtifact *unstructured.Unstructured) (ArtifactRea
}
func (reader *ResourceReader) Read() ([]byte, error) {
- log.WithField("resource", reader.resourceArtifact.Object).Debug("reading artifact from resource template")
+ log.WithField("resource", reader.resourceArtifact.Object).Debug("reading minio from resource template")
return yaml.Marshal(reader.resourceArtifact.Object)
}
diff --git a/store/store_test.go b/store/store_test.go
index 530da5a2db..6f1c1bae49 100644
--- a/store/store_test.go
+++ b/store/store_test.go
@@ -18,12 +18,12 @@ package store
import (
"io/ioutil"
- "k8s.io/client-go/kubernetes/fake"
"testing"
"github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes/fake"
)
type FakeWorkflowArtifactReader struct{}
@@ -34,12 +34,12 @@ func (f *FakeWorkflowArtifactReader) Read() ([]byte, error) {
func TestFetchArtifact(t *testing.T) {
reader := &FakeWorkflowArtifactReader{}
- gvk := &metav1.GroupVersionKind{
- Group: "argoproj.io",
- Version: "v1alpha1",
- Kind: "Workflow",
+ gvr := &metav1.GroupVersionResource{
+ Group: "argoproj.io",
+ Version: "v1alpha1",
+ Resource: "workflows",
}
- obj, err := FetchArtifact(reader, gvk)
+ obj, err := FetchArtifact(reader, gvr)
assert.Nil(t, err)
assert.Equal(t, "argoproj.io/v1alpha1", obj.GetAPIVersion())
assert.Equal(t, "Workflow", obj.GetKind())
@@ -57,37 +57,27 @@ func TestGetArtifactReader(t *testing.T) {
assert.NotNil(t, err)
}
-func TestDecodeAndUnstructure(t *testing.T) {
- t.Run("sensor", decodeSensor)
- t.Run("workflow", decodeWorkflow)
- // Note that since #16 - Restrict ResourceObject creation via RBAC roles
- // decoding&converting to unstructure objects should pass fine for any valid objects
- // the store no longer should control restrictions around object creation
- t.Run("unsupported", decodeUnsupported)
- t.Run("unknown", decodeUnknown)
-}
-
-func decodeSensor(t *testing.T) {
+func TestDecodeSensor(t *testing.T) {
b, err := ioutil.ReadFile("../examples/sensors/multi-trigger-sensor.yaml")
assert.Nil(t, err)
- gvk := &metav1.GroupVersionKind{
- Group: v1alpha1.SchemaGroupVersionKind.Group,
- Version: v1alpha1.SchemaGroupVersionKind.Version,
- Kind: v1alpha1.SchemaGroupVersionKind.Kind,
+ gvr := &metav1.GroupVersionResource{
+ Group: v1alpha1.SchemaGroupVersionKind.Group,
+ Version: v1alpha1.SchemaGroupVersionKind.Version,
+ Resource: v1alpha1.Resource("sensors").Resource,
}
- _, err = decodeAndUnstructure(b, gvk)
+ _, err = decodeAndUnstructure(b, gvr)
assert.Nil(t, err)
}
-func decodeWorkflow(t *testing.T) {
- gvk := &metav1.GroupVersionKind{
- Group: "argoproj.io",
- Version: "v1alpha1",
- Kind: "Workflow",
+func TestDecodeWorkflow(t *testing.T) {
+ gvr := &metav1.GroupVersionResource{
+ Group: "argoproj.io",
+ Version: "v1alpha1",
+ Resource: "workflows",
}
- _, err := decodeAndUnstructure([]byte(workflowv1alpha1), gvk)
+ _, err := decodeAndUnstructure([]byte(workflowv1alpha1), gvr)
assert.Nil(t, err)
}
@@ -106,13 +96,13 @@ spec:
args: ["hello world"]
`
-func decodeDeploymentv1(t *testing.T) {
- gvk := &metav1.GroupVersionKind{
- Group: "apps",
- Version: "v1",
- Kind: "Deployment",
+func TestDecodeDeploymentv1(t *testing.T) {
+ gvr := &metav1.GroupVersionResource{
+ Group: "apps",
+ Version: "v1",
+ Resource: "deployments",
}
- _, err := decodeAndUnstructure([]byte(deploymentv1), gvk)
+ _, err := decodeAndUnstructure([]byte(deploymentv1), gvr)
assert.Nil(t, err)
}
@@ -157,13 +147,13 @@ var deploymentv1 = `
}
`
-func decodeJobv1(t *testing.T) {
- gvk := &metav1.GroupVersionKind{
- Group: "batch",
- Version: "v1",
- Kind: "Job",
+func TestDecodeJobv1(t *testing.T) {
+ gvr := &metav1.GroupVersionResource{
+ Group: "batch",
+ Version: "v1",
+ Resource: "jobs",
}
- _, err := decodeAndUnstructure([]byte(jobv1), gvk)
+ _, err := decodeAndUnstructure([]byte(jobv1), gvr)
assert.Nil(t, err)
}
@@ -187,13 +177,13 @@ spec:
restartPolicy: Never
`
-func decodeUnsupported(t *testing.T) {
- gvk := &metav1.GroupVersionKind{
- Group: "batch",
- Version: "v1",
- Kind: "Job",
+func TestDecodeUnsupported(t *testing.T) {
+ gvr := &metav1.GroupVersionResource{
+ Group: "batch",
+ Version: "v1",
+ Resource: "jobs",
}
- _, err := decodeAndUnstructure([]byte(unsupportedType), gvk)
+ _, err := decodeAndUnstructure([]byte(unsupportedType), gvr)
assert.Nil(t, err)
}
@@ -224,12 +214,12 @@ spec:
done
`
-func decodeUnknown(t *testing.T) {
- gvk := &metav1.GroupVersionKind{
- Group: "unknown",
- Version: "123",
- Kind: "What??",
+func TestDecodeUnknown(t *testing.T) {
+ gvr := &metav1.GroupVersionResource{
+ Group: "unknown",
+ Version: "123",
+ Resource: "What??",
}
- _, err := decodeAndUnstructure([]byte(unsupportedType), gvk)
+ _, err := decodeAndUnstructure([]byte(unsupportedType), gvr)
assert.Nil(t, err, "expected nil error but got", err)
}
diff --git a/test/e2e/common/client.go b/test/e2e/common/client.go
deleted file mode 100644
index 7675674a03..0000000000
--- a/test/e2e/common/client.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package common
-
-import (
- "bytes"
- "fmt"
- "math/rand"
- "net/http"
- "net/url"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "time"
-
- gwv1 "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
- sv1 "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
- gwclient "github.com/argoproj/argo-events/pkg/client/gateway/clientset/versioned"
- sensorclient "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned"
- "github.com/pkg/errors"
- "k8s.io/client-go/kubernetes"
- "k8s.io/client-go/kubernetes/scheme"
- restclient "k8s.io/client-go/rest"
- "k8s.io/client-go/tools/clientcmd"
- "k8s.io/client-go/tools/portforward"
- "k8s.io/client-go/transport/spdy"
-)
-
-func init() {
- // Add custom schemes
- if err := sv1.AddToScheme(scheme.Scheme); err != nil {
- panic(err)
- }
- if err := gwv1.AddToScheme(scheme.Scheme); err != nil {
- panic(err)
- }
-}
-
-type E2EClient struct {
- Config *restclient.Config
- KubeClient kubernetes.Interface
- GwClient gwclient.Interface
- SnClient sensorclient.Interface
- E2EID string
- ClientID string
-}
-
-func NewE2EClient() (*E2EClient, error) {
- var kubeconfig string
- if os.Getenv("KUBECONFIG") != "" {
- kubeconfig = os.Getenv("KUBECONFIG")
- } else {
- kubeconfig = filepath.Join(os.Getenv("HOME"), ".kube/config")
- }
- config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
- if err != nil {
- return nil, err
- }
-
- kubeClient, err := kubernetes.NewForConfig(config)
- if err != nil {
- return nil, err
- }
-
- gwClient, err := gwclient.NewForConfig(config)
- if err != nil {
- return nil, err
- }
-
- sensorClient, err := sensorclient.NewForConfig(config)
- if err != nil {
- return nil, err
- }
-
- myrand := rand.New(rand.NewSource(time.Now().UnixNano()))
- clientID := strconv.FormatUint(myrand.Uint64(), 16)
-
- return &E2EClient{
- Config: config,
- KubeClient: kubeClient,
- GwClient: gwClient,
- SnClient: sensorClient,
- ClientID: clientID,
- }, nil
-}
-
-func (clpl *E2EClient) ForwardServicePort(tmpNamespace, podName string, localPort, targetPort int) (chan struct{}, error) {
- // Implementation ref: https://github.com/kubernetes/client-go/issues/51#issuecomment-436200428
- roundTripper, upgrader, err := spdy.RoundTripperFor(clpl.Config)
- if err != nil {
- return nil, err
- }
-
- path := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/portforward", tmpNamespace, podName)
- hostIP := strings.TrimLeft(clpl.Config.Host, "https://")
- serverURL := url.URL{Scheme: "https", Path: path, Host: hostIP}
-
- dialer := spdy.NewDialer(upgrader, &http.Client{Transport: roundTripper}, http.MethodPost, &serverURL)
-
- stopChan, readyChan := make(chan struct{}, 1), make(chan struct{}, 1)
-
- portDesc := fmt.Sprintf("%d:%d", localPort, targetPort)
- out, errOut := new(bytes.Buffer), new(bytes.Buffer)
- forwarder, err := portforward.New(dialer, []string{portDesc}, stopChan, readyChan, out, errOut)
- if err != nil {
- return nil, err
- }
-
- go func() {
- err = forwarder.ForwardPorts()
- if err != nil {
- fmt.Printf("%+v\n", err)
- }
- }()
-
- err = nil
-L:
- for {
- select {
- case <-time.After(10 * time.Second):
- err = errors.New("timed out port forwarding")
- break L
- case <-readyChan:
- break L
- default:
- }
- }
-
- return stopChan, err
-}
diff --git a/test/e2e/core/main_test.go b/test/e2e/core/main_test.go
deleted file mode 100644
index 737300c506..0000000000
--- a/test/e2e/core/main_test.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package core
-
-import (
- "fmt"
- "io/ioutil"
- "net"
- "net/http"
- "path/filepath"
- "runtime"
- "strings"
- "testing"
- "time"
-
- gwalpha1 "github.com/argoproj/argo-events/pkg/apis/gateway/v1alpha1"
- snv1alpha1 "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
- e2ecommon "github.com/argoproj/argo-events/test/e2e/common"
- "github.com/ghodss/yaml"
- "github.com/smartystreets/goconvey/convey"
- corev1 "k8s.io/api/core/v1"
- apierr "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-const NAMESPACE = "argo-events"
-
-func TestGeneralUseCase(t *testing.T) {
- client, err := e2ecommon.NewE2EClient()
- if err != nil {
- t.Fatal(err)
- }
-
- _, filename, _, _ := runtime.Caller(0)
- dir, err := filepath.Abs(filepath.Dir(filename))
- if err != nil {
- t.Fatal(err)
- }
- manifestsDir := filepath.Join(dir, "manifests", "general-use-case")
-
- convey.Convey("Test the general use case", t, func() {
-
- convey.Convey("Create event source", func() {
- esBytes, err := ioutil.ReadFile(filepath.Join(manifestsDir, "webhook-gateway-event-source.yaml"))
- if err != nil {
- convey.ShouldPanic(err)
- }
- var cm *corev1.ConfigMap
- if err := yaml.Unmarshal(esBytes, &cm); err != nil {
- convey.ShouldPanic(err)
- }
- if _, err = client.KubeClient.CoreV1().ConfigMaps(NAMESPACE).Create(cm); err != nil {
- convey.ShouldPanic(err)
- }
- })
-
- convey.Convey("Create a gateway.", func() {
- gwBytes, err := ioutil.ReadFile(filepath.Join(manifestsDir, "webhook-gateway.yaml"))
- if err != nil {
- convey.ShouldPanic(err)
- }
- var gw *gwalpha1.Gateway
- if err := yaml.Unmarshal(gwBytes, &gw); err != nil {
- convey.ShouldPanic(err)
- }
- if _, err = client.GwClient.ArgoprojV1alpha1().Gateways(NAMESPACE).Create(gw); err != nil {
- convey.ShouldPanic(err)
- }
- })
-
- convey.Convey("Create a sensor.", func() {
- swBytes, err := ioutil.ReadFile(filepath.Join(manifestsDir, "webhook-sensor.yaml"))
- if err != nil {
- convey.ShouldPanic(err)
- }
- var sn *snv1alpha1.Sensor
- if err := yaml.Unmarshal(swBytes, &sn); err != nil {
- convey.ShouldPanic(err)
- }
- if _, err = client.SnClient.ArgoprojV1alpha1().Sensors(NAMESPACE).Create(sn); err != nil {
- convey.ShouldPanic(err)
- }
- })
-
- convey.Convey("Wait for corresponding resources.", func() {
- ticker := time.NewTicker(time.Second)
- defer ticker.Stop()
- var gwpod, spod *corev1.Pod
- var gwsvc *corev1.Service
- for {
- if gwpod == nil {
- pod, err := client.KubeClient.CoreV1().Pods(NAMESPACE).Get("webhook-gateway", metav1.GetOptions{})
- if err != nil && !apierr.IsNotFound(err) {
- t.Fatal(err)
- }
- _, _ = yaml.Marshal(pod)
- if pod != nil && pod.Status.Phase == corev1.PodRunning {
- gwpod = pod
- }
- }
-
- if gwsvc == nil {
- svc, err := client.KubeClient.CoreV1().Services(NAMESPACE).Get("webhook-gateway-svc", metav1.GetOptions{})
- if err != nil && !apierr.IsNotFound(err) {
- t.Fatal(err)
- }
- gwsvc = svc
- }
- if spod == nil {
- pod, err := client.KubeClient.CoreV1().Pods(NAMESPACE).Get("webhook-sensor", metav1.GetOptions{})
- if err != nil && !apierr.IsNotFound(err) {
- t.Fatal(err)
- }
- if pod != nil && pod.Status.Phase == corev1.PodRunning {
- spod = pod
- }
- }
- if gwpod != nil && gwsvc != nil && spod != nil {
- break
- }
- }
- })
-
- convey.Convey("Make a request to the gateway.", func() {
- // Avoid too early access
- time.Sleep(5 * time.Second)
-
- // Use available port
- l, _ := net.Listen("tcp", ":0")
- port := l.Addr().(*net.TCPAddr).Port
- l.Close()
-
- // Use port forwarding to access pods in minikube
- stopChan, err := client.ForwardServicePort(NAMESPACE, "webhook-gateway", port, 12000)
- if err != nil {
- t.Fatal(err)
- }
- defer close(stopChan)
-
- url := fmt.Sprintf("http://localhost:%d/foo", port)
- req, err := http.NewRequest("POST", url, strings.NewReader("e2e"))
- if err != nil {
- t.Fatal(err)
- }
-
- resp, err := new(http.Client).Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
-
- if t.Failed() {
- t.FailNow()
- }
- })
-
- convey.Convey("Check if the sensor trigggered a pod.", func() {
- pod, err := client.KubeClient.CoreV1().Pods(NAMESPACE).Get("webhook-sensor-triggered-pod", metav1.GetOptions{})
- if err != nil && !apierr.IsNotFound(err) {
- t.Error(err)
- }
- if pod != nil && pod.Status.Phase == corev1.PodSucceeded {
- convey.So(pod.Spec.Containers[0].Args[0], convey.ShouldEqual, "e2e")
- }
- })
- })
-}
diff --git a/test/e2e/core/manifests/general-use-case/webhook-gateway-event-source.yaml b/test/e2e/core/manifests/general-use-case/webhook-gateway-event-source.yaml
deleted file mode 100644
index d274e271ae..0000000000
--- a/test/e2e/core/manifests/general-use-case/webhook-gateway-event-source.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: webhook-event-source
- labels:
- # do not remove
- argo-events-event-source-version: v0.11
-data:
- foo: |-
- # port to run HTTP server on
- port: "12000"
- # endpoint to listen to
- endpoint: "/index"
- # HTTP request method to allow. In this case, only POST requests are accepted
- method: "POST"
diff --git a/test/e2e/core/manifests/general-use-case/webhook-gateway.yaml b/test/e2e/core/manifests/general-use-case/webhook-gateway.yaml
deleted file mode 100644
index 0aac8fe809..0000000000
--- a/test/e2e/core/manifests/general-use-case/webhook-gateway.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
-apiVersion: argoproj.io/v1alpha1
-kind: Gateway
-metadata:
- name: webhook-gateway
- labels:
- # gateway controller with instanceId "argo-events" will process this gateway
- gateways.argoproj.io/gateway-controller-instanceid: argo-events
- # gateway controller will use this label to match with it's own version
- # do not remove
- argo-events-gateway-version: v0.11
-spec:
- type: "webhook"
- eventSource: "webhook-event-source"
- processorPort: "9330"
- eventProtocol:
- type: "HTTP"
- http:
- port: "9300"
- template:
- metadata:
- name: "webhook-gateway"
- labels:
- gateway-name: "webhook-gateway"
- spec:
- containers:
- - name: "gateway-client"
- image: "argoproj/gateway-client:v0.11"
- imagePullPolicy: "Always"
- command: ["/bin/gateway-client"]
- - name: "webhook-events"
- image: "argoproj/webhook-gateway:v0.11"
- imagePullPolicy: "Always"
- command: ["/bin/webhook-gateway"]
- # To make webhook secure, mount the secret that contains certificate and private key in the container
- # and refer that mountPath in the event source.
- # volumeMounts:
- # - mountPath: "/bin/webhook-secure"
- # name: secure
- # volumes:
- # - name: secure
- # secret:
- # secretName: webhook-secure
- serviceAccountName: "argo-events-sa"
- service:
- metadata:
- name: webhook-gateway-svc
- spec:
- selector:
- gateway-name: "webhook-gateway"
- ports:
- - port: 12000
- targetPort: 12000
- type: ClusterIP
- watchers:
- sensors:
- - name: "webhook-sensor"
diff --git a/test/e2e/core/manifests/general-use-case/webhook-sensor.yaml b/test/e2e/core/manifests/general-use-case/webhook-sensor.yaml
deleted file mode 100644
index 2c494b56bb..0000000000
--- a/test/e2e/core/manifests/general-use-case/webhook-sensor.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-apiVersion: argoproj.io/v1alpha1
-kind: Sensor
-metadata:
- name: webhook-sensor
- labels:
- sensors.argoproj.io/sensor-controller-instanceid: argo-events
- # sensor controller will use this label to match with it's own version
- # do not remove
- argo-events-sensor-version: v0.11
-spec:
- template:
- spec:
- containers:
- - name: "sensor"
- image: "argoproj/sensor:v0.11"
- imagePullPolicy: "IfNotPresent"
- serviceAccountName: argo-events-sa
- dependencies:
- - name: "webhook-gateway:foo"
- eventProtocol:
- type: "HTTP"
- http:
- port: "9300"
- triggers:
- - template:
- name: webhook-pod-trigger
- version: v1
- kind: Pod
- source:
- inline: |
- apiVersion: v1
- kind: Pod
- metadata:
- name: webhook-sensor-triggered-pod
- spec:
- containers:
- - name: whalesay
- image: docker/whalesay:latest
- command: [cowsay]
- args: ["TO_BE_PASSED"]
- restartPolicy: "Never"
- resourceParameters:
- - src:
- event: "webhook-gateway:foo"
- dest: spec.containers.0.args.0
diff --git a/version.go b/version.go
index 6eb89f17d4..5047bafc7c 100644
--- a/version.go
+++ b/version.go
@@ -24,7 +24,7 @@ import (
// Version information set by link flags during build. We fall back to these sane
// default values when we build outside the Makefile context (e.g. go build or go test).
var (
- version = "v0.11" // value from VERSION file
+ version = "v0.12-rc" // value from VERSION file
buildDate = "1970-01-01T00:00:00Z" // output from `date -u +'%Y-%m-%dT%H:%M:%SZ'`
gitCommit = "" // output from `git rev-parse HEAD`
gitTag = "" // output from `git describe --exact-match --tags HEAD` (if clean tree state)