From e69d1d6c9f577bfe7838a89acf5ee22d1c390644 Mon Sep 17 00:00:00 2001 From: Steven Davidovitz Date: Mon, 16 Sep 2024 13:39:57 -0700 Subject: [PATCH] initial commit of kubewire --- .github/CODEOWNERS | 1 + .github/dependabot.yml | 10 + .github/workflows/main.yml | 150 ++++ .gitignore | 3 + .golangci.yaml | 40 + .goreleaser.yaml | 30 + Dockerfile | 27 + LICENSE | 17 + Makefile | 54 ++ README.md | 196 +++++ cmd/agent.go | 61 ++ cmd/docgen.go | 28 + cmd/proxy.go | 70 ++ cmd/root.go | 63 ++ docs/kw.md | 16 + docs/kw_completion.md | 30 + docs/kw_completion_bash.md | 49 ++ docs/kw_completion_fish.md | 40 + docs/kw_completion_powershell.md | 37 + docs/kw_completion_zsh.md | 51 ++ docs/kw_proxy.md | 35 + examples/minikube/README.md | 15 + examples/minikube/hello-world.yml | 38 + examples/minikube/postgresql.yml | 52 ++ go.mod | 117 +++ go.sum | 340 ++++++++ main.go | 11 + pkg/agent/agent.go | 151 ++++ pkg/agent/agent_test.go | 138 +++ pkg/agent/kubernetes.go | 473 ++++++++++ pkg/agent/kubernetes_test.go | 1163 +++++++++++++++++++++++++ pkg/config/config.go | 142 +++ pkg/config/key.go | 25 + pkg/config/version.go | 3 + pkg/kuberneteshelpers/client.go | 26 + pkg/kuberneteshelpers/cluster.go | 109 +++ pkg/kuberneteshelpers/cluster_test.go | 238 +++++ pkg/kuberneteshelpers/object.go | 45 + pkg/kuberneteshelpers/podexec.go | 66 ++ pkg/nat/discovery.go | 68 ++ pkg/proxy/config.go | 110 +++ pkg/proxy/config_test.go | 150 ++++ pkg/proxy/proxy.go | 130 +++ pkg/routing/routing.go | 17 + pkg/routing/routing_darwin.go | 69 ++ pkg/routing/routing_linux.go | 118 +++ pkg/runnable/runnable.go | 9 + pkg/wg/device.go | 47 + pkg/wg/device_darwin.go | 127 +++ pkg/wg/device_linux.go | 125 +++ 50 files changed, 5130 insertions(+) create mode 100644 .github/CODEOWNERS create mode 100644 .github/dependabot.yml create mode 100644 .github/workflows/main.yml create mode 100644 .gitignore create mode 100644 .golangci.yaml create mode 100644 .goreleaser.yaml create mode 100644 Dockerfile create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 README.md create mode 100644 cmd/agent.go create mode 100644 cmd/docgen.go create mode 100644 cmd/proxy.go create mode 100644 cmd/root.go create mode 100644 docs/kw.md create mode 100644 docs/kw_completion.md create mode 100644 docs/kw_completion_bash.md create mode 100644 docs/kw_completion_fish.md create mode 100644 docs/kw_completion_powershell.md create mode 100644 docs/kw_completion_zsh.md create mode 100644 docs/kw_proxy.md create mode 100644 examples/minikube/README.md create mode 100644 examples/minikube/hello-world.yml create mode 100644 examples/minikube/postgresql.yml create mode 100644 go.mod create mode 100644 go.sum create mode 100644 main.go create mode 100644 pkg/agent/agent.go create mode 100644 pkg/agent/agent_test.go create mode 100644 pkg/agent/kubernetes.go create mode 100644 pkg/agent/kubernetes_test.go create mode 100644 pkg/config/config.go create mode 100644 pkg/config/key.go create mode 100644 pkg/config/version.go create mode 100644 pkg/kuberneteshelpers/client.go create mode 100644 pkg/kuberneteshelpers/cluster.go create mode 100644 pkg/kuberneteshelpers/cluster_test.go create mode 100644 pkg/kuberneteshelpers/object.go create mode 100644 pkg/kuberneteshelpers/podexec.go create mode 100644 pkg/nat/discovery.go create mode 100644 pkg/proxy/config.go create mode 100644 pkg/proxy/config_test.go create mode 100644 pkg/proxy/proxy.go create mode 100644 pkg/routing/routing.go create mode 100644 pkg/routing/routing_darwin.go create mode 100644 pkg/routing/routing_linux.go create mode 100644 pkg/runnable/runnable.go create mode 100644 pkg/wg/device.go create mode 100644 pkg/wg/device_darwin.go create mode 100644 pkg/wg/device_linux.go diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..9553f15 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @steved diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..d4acac6 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,10 @@ +version: 2 +updates: + - package-ecosystem: gomod + directory: / + schedule: + interval: daily + groups: + dependencies: + patterns: + - "*" diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000..8169cfb --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,150 @@ +name: Test and Publish Image + +on: + push: + branches: + - 'main' + tags: + - '*' + pull_request: + +env: + GO_VERSION: "1.23" + BUILD_PLATFORMS: linux/amd64,linux/arm64 + +jobs: + test: + strategy: + matrix: + os: [ubuntu-latest, macos-latest] + + runs-on: ${{ matrix.os }} + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + + - name: Build + run: make build + + - name: golangci-lint + uses: golangci/golangci-lint-action@v6 + with: + version: v1.60 + + - name: Unit test + run: make test + + - run: Check for git changes + run: git diff --exit-code || { echo "Make sure to commit doc changes"; exit 1 } + + publish: + runs-on: ubuntu-latest + + needs: test + + permissions: + contents: read + packages: write + id-token: write + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + # Don't use merge ref to ensure sha- image tag is accurate + ref: ${{ github.event.pull_request.head.sha }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to ghcr.io + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Login to quay.io + uses: docker/login-action@v3 + with: + registry: quay.io + username: ${{ secrets.QUAY_USER }} + password: ${{ secrets.QUAY_PASSWORD }} + + - id: meta + name: Extract Docker metadata + uses: docker/metadata-action@v5 + env: + DOCKER_METADATA_PR_HEAD_SHA: "true" + with: + images: | + ghcr.io/cerebrotech/k8s-fwd + quay.io/domino/wg + tags: | + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=sha + + - name: Go build cache for Docker + uses: actions/cache@v4 + with: + path: go-build-cache + key: ${{ runner.os }}-go-build-cache-${{ hashFiles('**/go.sum', 'Dockerfile') }} + + - name: inject go-build-cache into docker + uses: reproducible-containers/buildkit-cache-dance@v3 + with: + cache-map: | + { + "go-build-cache": "/root/.cache/go-build" + } + + - name: Build and push + uses: docker/build-push-action@v6 + with: + context: . + push: true + provenance: false + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + platforms: ${{ env.BUILD_PLATFORMS }} + cache-from: type=gha, scope=${{ github.job }} + cache-to: type=gha, scope=${{ github.job }} + + release: + runs-on: ubuntu-latest + + needs: publish + + permissions: + contents: write + + if: ${{ startsWith(github.ref, 'refs/tags/') }} + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@v6 + with: + distribution: goreleaser + version: '~> v2' + args: release --clean + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..605effb --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +.idea/ +.bin/ +dist/ diff --git a/.golangci.yaml b/.golangci.yaml new file mode 100644 index 0000000..a95fe42 --- /dev/null +++ b/.golangci.yaml @@ -0,0 +1,40 @@ +run: + timeout: 10m + +linters-settings: + misspell: + locale: US + +linters: + disable-all: true + enable: + # defaults + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - unused + # extra + - bodyclose + - containedctx + - contextcheck + - copyloopvar + - dogsled + - durationcheck + - gofmt + - goimports + - loggercheck + - misspell + - nilerr + - nilnil + - nosprintfhostport + - prealloc + - predeclared + - reassign + - revive + - unconvert + - unparam + - wastedassign + - whitespace + - wsl diff --git a/.goreleaser.yaml b/.goreleaser.yaml new file mode 100644 index 0000000..afef613 --- /dev/null +++ b/.goreleaser.yaml @@ -0,0 +1,30 @@ +version: 2 + +before: + hooks: + - make tidy + +builds: + - id: k8s-fwd + binary: k8s-fwd + ldflags: + - -s -w + - -X github.com/cerebrotech/k8s-fwd/pkg/config.Version={{ .Version }} + env: + - CGO_ENABLED=0 + goos: + - linux + - darwin + goarch: + - amd64 + - arm64 + +archives: + - format: tar.gz + name_template: '{{ .ProjectName }}-{{ .Version }}-{{ .Os }}-{{ .Arch }}' + +changelog: + sort: asc + +release: + mode: replace diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..289a289 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,27 @@ +FROM --platform=$BUILDPLATFORM cgr.dev/chainguard/go:latest-dev AS builder + +WORKDIR /src + +ARG ldflags +ARG TARGETOS TARGETARCH + +RUN --mount=target=. \ + --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=cache,target=/go/pkg/mod \ + CGO_ENABLED=0 GOOS=$TARGETOS GOARCH=$TARGETARCH \ + go build -trimpath -ldflags "${ldflags} -extldflags '-static'" -o /out/kubewire . + +FROM cgr.dev/chainguard/wolfi-base +WORKDIR / + +RUN apk add --no-cache \ + curl \ + iproute2 \ + iptables \ + iputils \ + net-tools \ + wireguard-tools + +COPY --from=builder /out/kubewire . + +ENTRYPOINT ["/kubewire", "agent"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..f85e365 --- /dev/null +++ b/LICENSE @@ -0,0 +1,17 @@ +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..4be0fcd --- /dev/null +++ b/Makefile @@ -0,0 +1,54 @@ +GOENV := CGO_ENABLED=0 +GO := $(GOENV) go +SHELL := /bin/bash + +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +GOOS=$(shell go env GOOS) +GOARCH=$(shell go env GOARCH) + +BIN ?= kubewire + +BIN_DIR ?= $(shell pwd)/.bin +$(BIN_DIR): + @mkdir -p $(BIN_DIR) + +.PHONY: build +build: docgen + $(GO) build -o $(BIN_DIR)/$(BIN) . + +docker: + docker build . \ + -t ghcr.io/steved/kubewire:latest \ + --platform linux/amd64 --push + +.PHONY: test +test: + $(GO) test -v -timeout=5m ./... + +.PHONY: docgen +docgen: + rm -r ./docs/* + $(GO) run main.go docgen + +ifeq (,$(shell command -v golangci-lint)) +GOLANGCI_LINT=$(GO) run github.com/golangci/golangci-lint/cmd/golangci-lint@v1.60.3 +else +GOLANGCI_LINT=golangci-lint +endif + +.PHONY: lint +lint: + $(GOLANGCI_LINT) run + +.PHONY: tidy +tidy: + @rm -f go.sum; go mod tidy + +.DEFAULT_GOAL:=help +help: ## Display this help + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) diff --git a/README.md b/README.md new file mode 100644 index 0000000..872ed7c --- /dev/null +++ b/README.md @@ -0,0 +1,196 @@ +# KubeWire + +[![](https://github.com/steved/kubewire/actions/workflows/main.yml/badge.svg)](https://github.com/steved/kubewire/actions) + +KubeWire allows easy, direct connections to, and through, a Kubernetes cluster. + +## How? + +KubeWire uses [WireGuard](https://www.wireguard.com/) to securely and easily connect the two networks. + +### Installation + +KubeWire currently supports Linux and MacOS. + +* Get the [latest release](https://github.com/steved/kubewire/releases/latest) for your operating system +* Unpack and install the binary: + ``` + tar -C /usr/local/bin -xf kubewire.tar.gz kubewire + ``` + +### Usage + +KubeWire requires access to Kubernetes (by default; `~/.kube/config`) and a deployment or statefulset to proxy traffic for and through. + +**Note*: `proxy` will modify the target resource in the cluster. Only execute the command in clusters where restoration is simple (e.g. with `helm`) or changes are not destructive. + +``` +$ kw proxy deploy/hello-world + +2024-09-16T12:33:33.403-0700 INFO Waiting for load balancer to be ready {"service": "wg-hello-world", "namespace": "default"} +2024-09-16T12:33:36.316-0700 INFO Load balancer ready, waiting for DNS to resolve {"hostname": "example.elb.us-west-2.amazonaws.com"} +2024-09-16T12:36:16.521-0700 INFO Kubernetes setup complete +2024-09-16T12:36:16.553-0700 INFO Wireguard device setup complete +2024-09-16T12:36:16.592-0700 INFO Routing setup complete +2024-09-16T12:36:16.592-0700 INFO Started. Use Ctrl-C to exit... +``` + +Optional arguments allow targeting of namespaces (`-n`), containers (`-c`). + +When `proxy` exits, created Kubernetes resources such as services or network policies will not be deleted to allow for easier resumption of an existing session. +Resources will be removed at exit with `--keep-resources=false` is passed. + +Once connected, access Kubernetes cluster resources directly. Including the K8s API: +``` +$ curl -k https://kubernetes.default +{ + "kind": "Status", + "apiVersion": "v1", + "metadata": {}, + "status": "Failure", + "message": "forbidden: User \"system:anonymous\" cannot get path \"/\"", + "reason": "Forbidden", + "details": {}, + "code": 403 +} +``` + +See [kw_proxy.md](./docs/kw_proxy.md) for detailed usage information. + +#### Direct access + +By default, KubeWire will access the pod by using a `LoadBalancer` service. KubeWire has been tested in AWS, GCP, and Azure. + +In environments where direct access is allowed from local host to remote pod or vice versa, direct modes can be used. + +If the remote pod has direct access to the local host, the accessible address of the local host can be passed to `proxy`. +For example, for minikube: +``` +$ ip=$(minikube ssh -- getent hosts host.minikube.internal | awk '{print $1}') +$ kw proxy --local-address "$ip:19070" deploy/hello-world +``` + +See [examples/minikube](./examples/minikube/README.md) for more information. + +If the remote pod is accessible to the internet through a NAT that supports Endpoint-Independent mapping, both the local and remote instances can attempt to discover their remote address, coordinate ports, and connect directly. +For example, in AWS with a [NAT instance](https://fck-nat.dev) instead of a NAT gateway: +``` +$ kw proxy --direct deploy/hello-world +``` + +### Limitations + +* Windows is not supported +* IPv6 is not supported +* Istio support has not been tested with ambient mesh + +### Troubleshooting + +#### WireGuard connectivity + +`wg` can be used to check WireGuard connectivity locally and in the remote pod: +``` +$ wg + +interface: utun4 + public key: (hidden) + private key: (hidden) + listening port: 19070 + +peer: (hidden) + endpoint: (address):19070 + allowed ips: 100.64.0.0/16, 172.20.0.0/16, 10.0.0.0/16, 10.1.0.0/28 + latest handshake: 11 seconds ago + transfer: 15.44 KiB received, 46.79 KiB sent + persistent keepalive: every 25 seconds + +$ kubectl exec -it $(kubectl get po -l app.kubernetes.io/name=hello-world -oname) -- wg + +interface: wg0 + public key: (hidden) + private key: (hidden) + listening port: 19070 + +peer: (hidden) + endpoint: (address):19070 + allowed ips: 100.64.0.0/16, 172.20.0.0/16, 10.0.0.0/16, 10.1.0.0/28 + latest handshake: 21 seconds ago + transfer: 14.08 KiB received, 22.81 KiB sent + persistent keepalive: every 25 seconds +``` + +If "latest handshake" isn't displayed or was a number of minutes ago, the connection may not be established. + +### Building from source + +KubeWire support Go v1.23. In order to build KubeWire from source: + +* Clone this repository +* Build and run the executable: + ``` + make build + .bin/kubewire + ``` + +## Why? + +### Performance + +In limited testing in comparison to `mirrord`, `kw` is multiple times less latent. + +Setup: +``` +PGPASSWORD=mysupersecretpassword psql -h postgresql.default -p 5432 -U postgres -c 'create database testdb' +PGPASSWORD=mysupersecretpassword pgbench -i -s 25 -h postgresql.default -U postgres testdb +PGPASSWORD=mysupersecretpassword pgbench -h postgresql.default -U postgres -c 5 -j 10 -R 25 -T 30 testdb +``` + +KubeWire: +``` +pgbench (14.13 (Homebrew), server 16.4) +starting vacuum...end. +transaction type: +scaling factor: 1 +query mode: simple +number of clients: 5 +number of threads: 5 +duration: 30 s +number of transactions actually processed: 746 +latency average = 13.604 ms +latency stddev = 3.557 ms +rate limit schedule lag: avg 3.242 (max 18.100) ms +initial connection time = 23.029 ms +tps = 24.885951 (without initial connection time) +``` + +mirrord: +``` +$ mirrord exec -s '' -n default -t pod/hello-world-86cfdd5fc6-zqr75 --steal --fs-mode local -- /bin/sh -c "PGPASSWORD=mysupersecretpassword pgbench -h postgresql.default -U postgres -c 5 -j 10 -R 25 -T 30 testdb" +starting vacuum...end. +transaction type: +scaling factor: 1 +query mode: simple +number of clients: 5 +number of threads: 5 +duration: 30 s +number of transactions actually processed: 745 +latency average = 34.005 ms +latency stddev = 25.867 ms +rate limit schedule lag: avg 6.589 (max 114.021) ms +initial connection time = 152.967 ms +tps = 24.910868 (without initial connection time) +``` + +### Similar projects + +* [k8s-insider](https://github.com/TrueGoric/k8s-insider) - no MacOS support +* [kubetunnel](https://github.com/we-dcode/kubetunnel) - operates at the service level +* [mirrord](https://github.com/metalbear-co/mirrord/) - performance + +## Contributing + +Issues and pull requests are always welcome! + +## License + +Use of this software is subject to important terms and conditions as set forth in the [LICENSE](./LICENSE) file. diff --git a/cmd/agent.go b/cmd/agent.go new file mode 100644 index 0000000..011055d --- /dev/null +++ b/cmd/agent.go @@ -0,0 +1,61 @@ +//go:build linux + +package cmd + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/go-logr/logr" + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" + + "github.com/steved/kubewire/pkg/agent" + "github.com/steved/kubewire/pkg/config" +) + +func init() { + var configFile string + + remoteCmd := &cobra.Command{ + Use: "agent", + Short: "Runs wireguard agent", + Hidden: true, // users shouldn't run this themselves + RunE: func(_ *cobra.Command, _ []string) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + configContents, err := os.ReadFile(configFile) + if err != nil { + return fmt.Errorf("unable to open config file %q: %w", configFile, err) + } + + cfg := config.Wireguard{} + + if err := yaml.Unmarshal(configContents, &cfg); err != nil { + return fmt.Errorf("unable to read config file %q: %w", configFile, err) + } + + var proxyExcludedPorts []string + localPortsExcludeProxy := os.Getenv("LOCAL_PORTS_EXCLUDE_PROXY") + if localPortsExcludeProxy != "" { + proxyExcludedPorts = strings.Split(localPortsExcludeProxy, ",") + } + + istioInterceptMode := os.Getenv("ISTIO_INTERCEPTION_MODE") + istioEnabled := istioInterceptMode != "" + if istioEnabled { + // istio health and prometheus ports + proxyExcludedPorts = append(proxyExcludedPorts, "15020", "15021") + } + + return agent.Run(logr.NewContext(ctx, log), cfg, istioEnabled, proxyExcludedPorts) + }, + } + + remoteCmd.Flags().StringVarP(&configFile, "config", "c", "/app/config/wg.yml", "path to configuration file") + + rootCmd.AddCommand(remoteCmd) +} diff --git a/cmd/docgen.go b/cmd/docgen.go new file mode 100644 index 0000000..cfa1e93 --- /dev/null +++ b/cmd/docgen.go @@ -0,0 +1,28 @@ +package cmd + +import ( + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +func init() { + var docsPath string + + var docgenCmd = &cobra.Command{ + Use: "docgen", + Short: "Generation documentation for the command line", + Hidden: true, + RunE: func(_ *cobra.Command, _ []string) error { + err := doc.GenMarkdownTree(rootCmd, docsPath) + if err != nil { + return err + } + + return nil + }, + } + + docgenCmd.Flags().StringVar(&docsPath, "out", "./docs/", "directory to write generated CLI documentation to") + + rootCmd.AddCommand(docgenCmd) +} diff --git a/cmd/proxy.go b/cmd/proxy.go new file mode 100644 index 0000000..a6a2401 --- /dev/null +++ b/cmd/proxy.go @@ -0,0 +1,70 @@ +package cmd + +import ( + "context" + goflag "flag" + "fmt" + "net/netip" + "os" + + "github.com/go-logr/logr" + "github.com/spf13/cobra" + + "github.com/steved/kubewire/pkg/config" + "github.com/steved/kubewire/pkg/kuberneteshelpers" + "github.com/steved/kubewire/pkg/proxy" +) + +func init() { + var ( + kubeconfig, overlayPrefix string + directAccess bool + ) + + cfg := config.NewConfig() + + proxyCmd := &cobra.Command{ + Use: "proxy [target]", + Short: "Proxy cluster access to the target Kubernetes object.", + Args: cobra.MinimumNArgs(1), + RunE: func(_ *cobra.Command, args []string) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + client, restConfig, err := kuberneteshelpers.ClientConfig(kubeconfig) + if err != nil { + return fmt.Errorf("failed to create kubernetes client: %w", err) + } + + obj, err := kuberneteshelpers.ResolveObject(kubeconfig, cfg.Namespace, args) + if err != nil { + return fmt.Errorf("failed to resolve target kubernetes object: %w", err) + } + + cfg.TargetObject = obj + + if err := proxy.ResolveWireguardConfig(ctx, cfg, client, overlayPrefix, directAccess); err != nil { + return fmt.Errorf("unable to create wireguard config: %w", err) + } + + return proxy.Run(logr.NewContext(ctx, log), cfg, client, restConfig) + }, + } + + proxyCmd.Flags().StringVarP(&kubeconfig, "kubeconfig", "", os.Getenv("KUBECONFIG"), "Kubernetes cfg file") + proxyCmd.Flags().StringVarP(&cfg.Namespace, "namespace", "n", "default", "Namespace of the target object") + proxyCmd.Flags().StringVarP(&cfg.Container, "container", "c", "", "Name of the container to replace") + proxyCmd.Flags().StringVarP(&overlayPrefix, "overlay", "o", "", "Specify the overlay CIDR for Wireguard. Useful if auto-detection fails") + proxyCmd.Flags().BoolVarP(&directAccess, "direct", "p", false, "Whether to try NAT hole punching (true) or use a load balancer for access to the pod") + proxyCmd.Flags().StringVarP(&cfg.AgentImage, "agent-image", "i", fmt.Sprintf("ghcr.io/steved/kubewire:%s", config.Version), "Agent image to use") + proxyCmd.Flags().BoolVarP(&cfg.KeepResources, "keep-resources", "k", true, "Keep created resources running when exiting") + + // Workaround for lack of "TextVar" support in pflag / cobra + goflag.TextVar(&cfg.KubernetesClusterDetails.ServiceCIDR, "service-cidr", netip.Prefix{}, "Kubernetes Service CIDR") + goflag.TextVar(&cfg.KubernetesClusterDetails.NodeCIDR, "node-cidr", netip.Prefix{}, "Kubernetes node CIDR") + goflag.TextVar(&cfg.KubernetesClusterDetails.PodCIDR, "pod-cidr", netip.Prefix{}, "Kubernetes pod CIDR") + goflag.TextVar(&cfg.Wireguard.LocalAddress, "local-address", netip.AddrPort{}, "Local address accessible from remote agent") + proxyCmd.Flags().AddGoFlagSet(goflag.CommandLine) + + rootCmd.AddCommand(proxyCmd) +} diff --git a/cmd/root.go b/cmd/root.go new file mode 100644 index 0000000..a34be02 --- /dev/null +++ b/cmd/root.go @@ -0,0 +1,63 @@ +package cmd + +import ( + "os" + "strconv" + + "github.com/go-logr/logr" + "github.com/go-logr/zapr" + "github.com/spf13/cobra" + "go.uber.org/zap" +) + +var log logr.Logger +var debug bool + +var rootCmd = &cobra.Command{ + Use: "kw", + Short: "KubeWire allows easy, direct connections to, and through, a Kubernetes cluster.", + SilenceUsage: true, + DisableAutoGenTag: true, + PersistentPreRun: func(_ *cobra.Command, _ []string) { + config := zap.Config{ + Level: zap.NewAtomicLevelAt(zap.InfoLevel), + Development: false, + DisableCaller: true, + DisableStacktrace: false, + Encoding: "console", + EncoderConfig: zap.NewDevelopmentEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } + + if debug { + config.Development = true + config.DisableCaller = false + config.Level = zap.NewAtomicLevelAt(zap.DebugLevel) + } + + log = zapr.NewLogger(zap.Must(config.Build())) + }, +} + +func init() { + var ( + debugDefault bool + err error + ) + + if envDebug := os.Getenv("DEBUG"); envDebug != "" { + debugDefault, err = strconv.ParseBool(envDebug) + if err != nil { + log.Error(err, "unable to parse DEBUG env variable") + } + } + + rootCmd.PersistentFlags().BoolVarP(&debug, "debug", "d", debugDefault, "Toggle debug logging") +} + +func Execute() { + if err := rootCmd.Execute(); err != nil { + os.Exit(1) + } +} diff --git a/docs/kw.md b/docs/kw.md new file mode 100644 index 0000000..7807a29 --- /dev/null +++ b/docs/kw.md @@ -0,0 +1,16 @@ +## kw + +KubeWire allows easy, direct connections to, and through, a Kubernetes cluster. + +### Options + +``` + -d, --debug Toggle debug logging + -h, --help help for kw +``` + +### SEE ALSO + +* [kw completion](kw_completion.md) - Generate the autocompletion script for the specified shell +* [kw proxy](kw_proxy.md) - Proxy cluster access to the target Kubernetes object. + diff --git a/docs/kw_completion.md b/docs/kw_completion.md new file mode 100644 index 0000000..c6fcca5 --- /dev/null +++ b/docs/kw_completion.md @@ -0,0 +1,30 @@ +## kw completion + +Generate the autocompletion script for the specified shell + +### Synopsis + +Generate the autocompletion script for kw for the specified shell. +See each sub-command's help for details on how to use the generated script. + + +### Options + +``` + -h, --help help for completion +``` + +### Options inherited from parent commands + +``` + -d, --debug Toggle debug logging +``` + +### SEE ALSO + +* [kw](kw.md) - KubeWire allows easy, direct connections to, and through, a Kubernetes cluster. +* [kw completion bash](kw_completion_bash.md) - Generate the autocompletion script for bash +* [kw completion fish](kw_completion_fish.md) - Generate the autocompletion script for fish +* [kw completion powershell](kw_completion_powershell.md) - Generate the autocompletion script for powershell +* [kw completion zsh](kw_completion_zsh.md) - Generate the autocompletion script for zsh + diff --git a/docs/kw_completion_bash.md b/docs/kw_completion_bash.md new file mode 100644 index 0000000..de1f3d6 --- /dev/null +++ b/docs/kw_completion_bash.md @@ -0,0 +1,49 @@ +## kw completion bash + +Generate the autocompletion script for bash + +### Synopsis + +Generate the autocompletion script for the bash shell. + +This script depends on the 'bash-completion' package. +If it is not installed already, you can install it via your OS's package manager. + +To load completions in your current shell session: + + source <(kw completion bash) + +To load completions for every new session, execute once: + +#### Linux: + + kw completion bash > /etc/bash_completion.d/kw + +#### macOS: + + kw completion bash > $(brew --prefix)/etc/bash_completion.d/kw + +You will need to start a new shell for this setup to take effect. + + +``` +kw completion bash +``` + +### Options + +``` + -h, --help help for bash + --no-descriptions disable completion descriptions +``` + +### Options inherited from parent commands + +``` + -d, --debug Toggle debug logging +``` + +### SEE ALSO + +* [kw completion](kw_completion.md) - Generate the autocompletion script for the specified shell + diff --git a/docs/kw_completion_fish.md b/docs/kw_completion_fish.md new file mode 100644 index 0000000..4a2ddc4 --- /dev/null +++ b/docs/kw_completion_fish.md @@ -0,0 +1,40 @@ +## kw completion fish + +Generate the autocompletion script for fish + +### Synopsis + +Generate the autocompletion script for the fish shell. + +To load completions in your current shell session: + + kw completion fish | source + +To load completions for every new session, execute once: + + kw completion fish > ~/.config/fish/completions/kw.fish + +You will need to start a new shell for this setup to take effect. + + +``` +kw completion fish [flags] +``` + +### Options + +``` + -h, --help help for fish + --no-descriptions disable completion descriptions +``` + +### Options inherited from parent commands + +``` + -d, --debug Toggle debug logging +``` + +### SEE ALSO + +* [kw completion](kw_completion.md) - Generate the autocompletion script for the specified shell + diff --git a/docs/kw_completion_powershell.md b/docs/kw_completion_powershell.md new file mode 100644 index 0000000..3c056f7 --- /dev/null +++ b/docs/kw_completion_powershell.md @@ -0,0 +1,37 @@ +## kw completion powershell + +Generate the autocompletion script for powershell + +### Synopsis + +Generate the autocompletion script for powershell. + +To load completions in your current shell session: + + kw completion powershell | Out-String | Invoke-Expression + +To load completions for every new session, add the output of the above command +to your powershell profile. + + +``` +kw completion powershell [flags] +``` + +### Options + +``` + -h, --help help for powershell + --no-descriptions disable completion descriptions +``` + +### Options inherited from parent commands + +``` + -d, --debug Toggle debug logging +``` + +### SEE ALSO + +* [kw completion](kw_completion.md) - Generate the autocompletion script for the specified shell + diff --git a/docs/kw_completion_zsh.md b/docs/kw_completion_zsh.md new file mode 100644 index 0000000..53b1151 --- /dev/null +++ b/docs/kw_completion_zsh.md @@ -0,0 +1,51 @@ +## kw completion zsh + +Generate the autocompletion script for zsh + +### Synopsis + +Generate the autocompletion script for the zsh shell. + +If shell completion is not already enabled in your environment you will need +to enable it. You can execute the following once: + + echo "autoload -U compinit; compinit" >> ~/.zshrc + +To load completions in your current shell session: + + source <(kw completion zsh) + +To load completions for every new session, execute once: + +#### Linux: + + kw completion zsh > "${fpath[1]}/_kw" + +#### macOS: + + kw completion zsh > $(brew --prefix)/share/zsh/site-functions/_kw + +You will need to start a new shell for this setup to take effect. + + +``` +kw completion zsh [flags] +``` + +### Options + +``` + -h, --help help for zsh + --no-descriptions disable completion descriptions +``` + +### Options inherited from parent commands + +``` + -d, --debug Toggle debug logging +``` + +### SEE ALSO + +* [kw completion](kw_completion.md) - Generate the autocompletion script for the specified shell + diff --git a/docs/kw_proxy.md b/docs/kw_proxy.md new file mode 100644 index 0000000..4b35f07 --- /dev/null +++ b/docs/kw_proxy.md @@ -0,0 +1,35 @@ +## kw proxy + +Proxy cluster access to the target Kubernetes object. + +``` +kw proxy [target] [flags] +``` + +### Options + +``` + -i, --agent-image string Agent image to use (default "ghcr.io/steved/kubewire:latest") + -c, --container string Name of the container to replace + -p, --direct Whether to try NAT hole punching (true) or use a load balancer for access to the pod + -h, --help help for proxy + -k, --keep-resources Keep created resources running when exiting (default true) + --kubeconfig string Kubernetes cfg file + --local-address text Local address accessible from remote agent + -n, --namespace string Namespace of the target object (default "default") + --node-cidr text Kubernetes node CIDR + -o, --overlay string Specify the overlay CIDR for Wireguard. Useful if auto-detection fails + --pod-cidr text Kubernetes pod CIDR + --service-cidr text Kubernetes Service CIDR +``` + +### Options inherited from parent commands + +``` + -d, --debug Toggle debug logging +``` + +### SEE ALSO + +* [kw](kw.md) - KubeWire allows easy, direct connections to, and through, a Kubernetes cluster. + diff --git a/examples/minikube/README.md b/examples/minikube/README.md new file mode 100644 index 0000000..ffc576c --- /dev/null +++ b/examples/minikube/README.md @@ -0,0 +1,15 @@ +## Quickstart + +To try out KubeWire locally, install [minikube](https://minikube.sigs.k8s.io/docs/start/) and get started: + +1. `minikube start` +2. `kubectl apply -f examples/docs/*.yml` +3. Run kw: + ``` + $ ip=$(minikube ssh -- getent hosts host.minikube.internal | awk '{print $1}') + $ kw proxy deploy/hello-world --local-address 192.168.65.254:19070 --service-cidr 10.96.0.0/12 + ``` +4. Access the cluster! + ``` + PGPASSWORD=mysupersecretpassword psql -h postgresql.default -p 5432 -U postgres -c 'create database testdb' + ``` diff --git a/examples/minikube/hello-world.yml b/examples/minikube/hello-world.yml new file mode 100644 index 0000000..cd33fc5 --- /dev/null +++ b/examples/minikube/hello-world.yml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Service +metadata: + name: hello-world + labels: + app.kubernetes.io/name: hello-world +spec: + type: ClusterIP + ports: + - name: http + port: 80 + targetPort: http + selector: + app.kubernetes.io/name: hello-world +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hello-world +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: hello-world + template: + metadata: + labels: + app.kubernetes.io/name: hello-world + spec: + containers: + - name: whoami + image: traefik/whoami + env: + - name: WHOAMI_PORT_NUMBER + value: "8080" + ports: + - name: http + containerPort: 8080 diff --git a/examples/minikube/postgresql.yml b/examples/minikube/postgresql.yml new file mode 100644 index 0000000..0093331 --- /dev/null +++ b/examples/minikube/postgresql.yml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: Service +metadata: + name: postgresql + labels: + app.kubernetes.io/name: postgresql +spec: + type: ClusterIP + ports: + - name: psql + port: 5432 + targetPort: psql + selector: + app.kubernetes.io/name: postgresql +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: postgresql +spec: + selector: + matchLabels: + app.kubernetes.io/name: postgresql + replicas: 1 + template: + metadata: + labels: + app.kubernetes.io/name: postgresql + spec: + containers: + - name: postgresql + image: postgres:16-alpine + env: + - name: POSTGRES_PASSWORD + value: mysupersecretpassword + - name: PGDATA + value: /var/lib/postgresql/data/pgdata + ports: + - containerPort: 5432 + name: psql + volumeMounts: + - name: data + mountPath: /var/lib/postgresql/data + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..72d792f --- /dev/null +++ b/go.mod @@ -0,0 +1,117 @@ +module github.com/steved/kubewire + +go 1.23.0 + +toolchain go1.23.1 + +require ( + github.com/aws/aws-sdk-go v1.55.5 + github.com/go-logr/zapr v1.3.0 + github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 + github.com/jsimonetti/rtnetlink v1.4.2 + github.com/pion/ice/v3 v3.0.16 + github.com/stretchr/testify v1.9.0 + github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc + go.uber.org/zap v1.27.0 + golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 + k8s.io/cli-runtime v0.31.1 + k8s.io/client-go v0.31.1 + tailscale.com v1.74.0 +) + +require ( + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect +) + +require ( + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/coreos/go-iptables v0.8.0 + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dblohm7/wingoes v0.0.0-20240820181039-f2b84150679e // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-errors/errors v1.5.1 // indirect + github.com/go-json-experiment/json v0.0.0-20240815175050-ebd3a8989ca1 // indirect + github.com/go-logr/logr v1.4.2 + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/google/uuid v1.6.0 + github.com/gorilla/websocket v1.5.3 // indirect + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mdlayher/genetlink v1.3.2 // indirect + github.com/mdlayher/netlink v1.7.2 // indirect + github.com/mdlayher/socket v0.5.1 // indirect + github.com/moby/spdystream v0.5.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/pion/dtls/v2 v2.2.12 // indirect + github.com/pion/logging v0.2.2 // indirect + github.com/pion/mdns/v2 v2.0.7 // indirect + github.com/pion/randutil v0.1.0 // indirect + github.com/pion/stun/v2 v2.0.0 + github.com/pion/transport/v2 v2.2.10 // indirect + github.com/pion/transport/v3 v3.0.7 // indirect + github.com/pion/turn/v3 v3.0.3 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/spf13/cobra v1.8.1 + github.com/spf13/pflag v1.0.5 // indirect + github.com/wlynxg/anet v0.0.4 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + go.starlark.net v0.0.0-20240725214946-42030a7cedce // indirect + go.uber.org/multierr v1.11.0 // indirect + go4.org/mem v0.0.0-20240501181205-ae6ca9944745 // indirect + go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect + golang.org/x/crypto v0.27.0 + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.25.0 + golang.org/x/term v0.24.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/time v0.6.0 // indirect + golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect + golang.zx2c4.com/wireguard v0.0.0-20231211153847-12269c276173 // indirect + golang.zx2c4.com/wireguard/windows v0.5.3 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 + k8s.io/api v0.31.1 + k8s.io/apimachinery v0.31.1 + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 // indirect + k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/kustomize/api v0.17.3 // indirect + sigs.k8s.io/kustomize/kyaml v0.17.2 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..11041b8 --- /dev/null +++ b/go.sum @@ -0,0 +1,340 @@ +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI= +github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cilium/ebpf v0.15.0 h1:7NxJhNiBT3NG8pZJ3c+yfrVdHY8ScgKD27sScgjLMMk= +github.com/cilium/ebpf v0.15.0/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P1Hso= +github.com/coreos/go-iptables v0.8.0 h1:MPc2P89IhuVpLI7ETL/2tx3XZ61VeICZjYqDEgNsPRc= +github.com/coreos/go-iptables v0.8.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= +github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dblohm7/wingoes v0.0.0-20240820181039-f2b84150679e h1:L+XrFvD0vBIBm+Wf9sFN6aU395t7JROoai0qXZraA4U= +github.com/dblohm7/wingoes v0.0.0-20240820181039-f2b84150679e/go.mod h1:SUxUaAK/0UG5lYyZR1L1nC4AaYYvSSYTWQSH3FPcxKU= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= +github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-json-experiment/json v0.0.0-20240815175050-ebd3a8989ca1 h1:xcuWappghOVI8iNWoF2OKahVejd1LSVi/v4JED44Amo= +github.com/go-json-experiment/json v0.0.0-20240815175050-ebd3a8989ca1/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= +github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= +github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0WwEDBeqxKwb7WB62QX8bvZ/FJnVXIfk= +github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8= +github.com/jsimonetti/rtnetlink v1.4.2 h1:Df9w9TZ3npHTyDn0Ev9e1uzmN2odmXd0QX+J5GTEn90= +github.com/jsimonetti/rtnetlink v1.4.2/go.mod h1:92s6LJdE+1iOrw+F2/RO7LYI2Qd8pPpFNNUYW06gcoM= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= +github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= +github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= +github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos= +github.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ= +github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= +github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= +github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= +github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= +github.com/pion/ice/v3 v3.0.16 h1:YoPlNg3jU1UT/DDTa9v/g1vH6A2/pAzehevI1o66H8E= +github.com/pion/ice/v3 v3.0.16/go.mod h1:SdmubtIsCcvdb1ZInrTUz7Iaqi90/rYd1pzbzlMxsZg= +github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM= +github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA= +github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= +github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= +github.com/pion/stun/v2 v2.0.0 h1:A5+wXKLAypxQri59+tmQKVs7+l6mMM+3d+eER9ifRU0= +github.com/pion/stun/v2 v2.0.0/go.mod h1:22qRSh08fSEttYUmJZGlriq9+03jtVmXNODgLccj8GQ= +github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= +github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= +github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= +github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= +github.com/pion/turn/v3 v3.0.3 h1:1e3GVk8gHZLPBA5LqadWYV60lmaKUaHCkm9DX9CkGcE= +github.com/pion/turn/v3 v3.0.3/go.mod h1:vw0Dz420q7VYAF3J4wJKzReLHIo2LGp4ev8nXQexYsc= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc h1:cezaQN9pvKVaw56Ma5qr/G646uKIYP0yQf+OyWN/okc= +github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/wlynxg/anet v0.0.4 h1:0de1OFQxnNqAu+x2FAKKCVIrnfGKQbs7FQz++tB0+Uw= +github.com/wlynxg/anet v0.0.4/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.starlark.net v0.0.0-20240725214946-42030a7cedce h1:YyGqCjZtGZJ+mRPaenEiB87afEO2MFRzLiJNZ0Z0bPw= +go.starlark.net v0.0.0-20240725214946-42030a7cedce/go.mod h1:YKMCv9b1WrfWmeqdV5MAuEHWsu5iC+fe6kYl2sQjdI8= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek= +go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.1-0.20230131160137-e7d7f63158de/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg= +golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI= +golang.zx2c4.com/wireguard v0.0.0-20231211153847-12269c276173 h1:/jFs0duh4rdb8uIfPMv78iAJGcPKDeqAFnaLBropIC4= +golang.zx2c4.com/wireguard v0.0.0-20231211153847-12269c276173/go.mod h1:tkCQ4FQXmpAgYVh++1cq16/dH4QJtmvpRv19DWGAHSA= +golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 h1:CawjfCvYQH2OU3/TnxLx97WDSUDRABfT18pCOYwc2GE= +golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6/go.mod h1:3rxYc4HtVcSG9gVaTs2GEBdehh+sYPOwKtyUWEOTb80= +golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE= +golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8= +gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/cli-runtime v0.31.1 h1:/ZmKhmZ6hNqDM+yf9s3Y4KEYakNXUn5sod2LWGGwCuk= +k8s.io/cli-runtime v0.31.1/go.mod h1:pKv1cDIaq7ehWGuXQ+A//1OIF+7DI+xudXtExMCbe9U= +k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= +k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo= +k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 h1:b2FmK8YH+QEwq/Sy2uAEhmqL5nPfGYbJOcaqjeYYZoA= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize/api v0.17.3 h1:6GCuHSsxq7fN5yhF2XrC+AAr8gxQwhexgHflOAD/JJU= +sigs.k8s.io/kustomize/api v0.17.3/go.mod h1:TuDH4mdx7jTfK61SQ/j1QZM/QWR+5rmEiNjvYlhzFhc= +sigs.k8s.io/kustomize/kyaml v0.17.2 h1:+AzvoJUY0kq4QAhH/ydPHHMRLijtUKiyVyh7fOSshr0= +sigs.k8s.io/kustomize/kyaml v0.17.2/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +tailscale.com v1.74.0 h1:J+vRN9o3D4wCqZBiwvDg9kZpQag2mG4Xz5RXNpmV3KE= +tailscale.com v1.74.0/go.mod h1:3iACpCONQ4lauDXvwfoGlwNCpfbVxjdc2j6G9EuFOW8= diff --git a/main.go b/main.go new file mode 100644 index 0000000..6dbf875 --- /dev/null +++ b/main.go @@ -0,0 +1,11 @@ +package main + +import ( + _ "embed" + + "github.com/steved/kubewire/cmd" +) + +func main() { + cmd.Execute() +} diff --git a/pkg/agent/agent.go b/pkg/agent/agent.go new file mode 100644 index 0000000..08b126c --- /dev/null +++ b/pkg/agent/agent.go @@ -0,0 +1,151 @@ +package agent + +import ( + "context" + "fmt" + "net/netip" + "os" + "os/signal" + "strings" + "syscall" + + "github.com/coreos/go-iptables/iptables" + "github.com/go-logr/logr" + "tailscale.com/net/netutil" + + "github.com/steved/kubewire/pkg/config" + "github.com/steved/kubewire/pkg/nat" + "github.com/steved/kubewire/pkg/routing" + "github.com/steved/kubewire/pkg/wg" +) + +var defaultInterface = func() (string, netip.Addr, error) { + return netutil.DefaultInterfacePortable() +} + +type iptablesManager interface { + AppendUnique(string, string, ...string) error + InsertUnique(string, string, int, ...string) error + ChainExists(string, string) (bool, error) +} + +func Run(ctx context.Context, cfg config.Wireguard, istioEnabled bool, proxyExcludedPorts []string) error { + log := logr.FromContextOrDiscard(ctx) + + var listenPort int + + if cfg.DirectAccess { + log.V(1).Info("Starting NAT address lookup") + + localHost, localPort, err := nat.FindLocalAddressAndPort(ctx) + if err != nil { + return fmt.Errorf("unable to proxy connectable address for NAT traversal: %w", err) + } + + listenPort = localPort + + localAddress := fmt.Sprintf("%s:%d", localHost, localPort) + if err := os.WriteFile(ContainerAddressPath, []byte(localAddress), 0600); err != nil { + return err + } + + log.Info("NAT address lookup complete", "address", localAddress) + } else if cfg.LocalAddress.IsValid() { + listenPort = -1 + } + + log.V(1).Info("Starting wireguard device setup") + + wireguardDevice := wg.NewWireguardDevice(wg.WireguardDeviceConfig{ + Peer: wg.WireguardDevicePeer{ + Endpoint: cfg.LocalAddress, + PublicKey: cfg.LocalKey.PublicKey(), + AllowedIPs: cfg.AllowedIPs, + }, + PrivateKey: cfg.AgentKey.Key, + ListenPort: listenPort, + Address: cfg.AgentOverlayAddress, + }) + + wgStop, err := wireguardDevice.Start(ctx) + if err != nil { + return err + } + + defer wgStop() + + log.Info("Wireguard device setup complete") + + log.V(1).Info("Starting route setup") + + router := routing.NewRouting(wireguardDevice.DeviceName(), netip.Addr{}, netip.PrefixFrom(cfg.LocalOverlayAddress, 32)) + + routerStop, err := router.Start(ctx) + if err != nil { + return err + } + + defer routerStop() + + log.Info("Routing setup complete") + + log.V(1).Info("Starting IPTables setup") + + ipt, err := iptables.New(iptables.IPFamily(iptables.ProtocolIPv4), iptables.Timeout(5)) + if err != nil { + return fmt.Errorf("unable to initialize iptables client: %w", err) + } + + if err := updateIPTablesRules(cfg, ipt, wireguardDevice.DeviceName(), istioEnabled, proxyExcludedPorts); err != nil { + return err + } + + log.Info("IPTables setup complete") + + log.Info("Started, waiting for signal") + + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + <-sigCh + + return nil +} + +func updateIPTablesRules(cfg config.Wireguard, ipt iptablesManager, wireguardDeviceName string, istioEnabled bool, proxyExcludedPorts []string) error { + deviceName, deviceAddr, err := defaultInterface() + if err != nil { + return fmt.Errorf("unable to determine default device name: %w", err) + } + + if err := ipt.AppendUnique("nat", "POSTROUTING", "-p", "udp", "-o", deviceName, "-j", "MASQUERADE"); err != nil { + return fmt.Errorf("unable to create iptables rule: %w", err) + } + + defaultIfaceRulespec := []string{"-p", "tcp", "-i", deviceName} + + if len(proxyExcludedPorts) > 0 { + defaultIfaceRulespec = append(defaultIfaceRulespec, "-m", "multiport", "!", "--dports", strings.Join(proxyExcludedPorts, ",")) + } + + defaultIfaceRulespec = append(defaultIfaceRulespec, "-j", "DNAT", "--to-destination", cfg.LocalOverlayAddress.String()) + + if err := ipt.AppendUnique("nat", "PREROUTING", defaultIfaceRulespec...); err != nil { + return fmt.Errorf("unable to create iptables rule: %w", err) + } + + if istioEnabled { + if err := ipt.InsertUnique("nat", "PREROUTING", 1, "-p", "tcp", "-i", wireguardDeviceName, "-j", "DNAT", "--to-destination", "127.0.0.6:15001"); err != nil { + return fmt.Errorf("unable to create iptables rule: %w", err) + } + } else { + if err := ipt.AppendUnique("nat", "PREROUTING", "-p", "tcp", "-i", wireguardDeviceName, "--destination", deviceAddr.String(), "-j", "DNAT", "--to-destination", cfg.LocalOverlayAddress.String()); err != nil { + return fmt.Errorf("unable to create iptables rule: %w", err) + } + + if err := ipt.AppendUnique("nat", "POSTROUTING", "-p", "tcp", "-o", deviceName, "-j", "MASQUERADE"); err != nil { + return fmt.Errorf("unable to create iptables rule: %w", err) + } + } + + return nil +} diff --git a/pkg/agent/agent_test.go b/pkg/agent/agent_test.go new file mode 100644 index 0000000..7178af2 --- /dev/null +++ b/pkg/agent/agent_test.go @@ -0,0 +1,138 @@ +package agent + +import ( + "net/netip" + "reflect" + "slices" + "strings" + "testing" + + "github.com/steved/kubewire/pkg/config" +) + +type fakeIptables struct { + rules map[string]map[string][]string +} + +func (f *fakeIptables) AppendUnique(table string, chain string, rulespec ...string) error { + t, ok := f.rules[table] + if !ok { + t = make(map[string][]string) + f.rules[table] = t + } + + t[chain] = append(t[chain], strings.Join(rulespec, " ")) + + return nil +} + +func (f *fakeIptables) InsertUnique(table string, chain string, pos int, rulespec ...string) error { + f.rules[table][chain] = slices.Insert(f.rules[table][chain], pos, strings.Join(rulespec, " ")) + + return nil +} + +func (f *fakeIptables) ChainExists(table string, chain string) (bool, error) { + t, ok := f.rules[table] + if !ok { + return false, nil + } + + if _, ok := t[chain]; !ok { + return false, nil + } + + return true, nil +} + +func Test_updateIPTablesRules(t *testing.T) { + defaultInterface = func() (string, netip.Addr, error) { + return "eth0", netip.AddrFrom4([4]byte{100, 34, 56, 10}), nil + } + + cfg := config.Wireguard{LocalOverlayAddress: netip.MustParseAddr("10.1.0.1")} + + tests := []struct { + name string + istioEnabled bool + proxyExcludedPorts []string + existingRules map[string]map[string][]string + wantRules map[string]map[string][]string + wantErr bool + }{ + { + "basic", + false, + nil, + nil, + map[string]map[string][]string{ + "nat": { + "PREROUTING": { + "-p tcp -i eth0 -j DNAT --to-destination 10.1.0.1", + "-p tcp -i wg0 --destination 100.34.56.10 -j DNAT --to-destination 10.1.0.1", + }, + "POSTROUTING": { + "-p udp -o eth0 -j MASQUERADE", + "-p tcp -o eth0 -j MASQUERADE", + }, + }, + }, + false, + }, + { + "excluded ports", + false, + []string{"12345", "23456"}, + nil, + map[string]map[string][]string{ + "nat": { + "PREROUTING": { + "-p tcp -i eth0 -m multiport ! --dports 12345,23456 -j DNAT --to-destination 10.1.0.1", + "-p tcp -i wg0 --destination 100.34.56.10 -j DNAT --to-destination 10.1.0.1", + }, + "POSTROUTING": { + "-p udp -o eth0 -j MASQUERADE", + "-p tcp -o eth0 -j MASQUERADE", + }, + }, + }, + false, + }, + { + "istio", + true, + []string{"12345", "23456"}, + nil, + map[string]map[string][]string{ + "nat": { + "PREROUTING": { + "-p tcp -i eth0 -m multiport ! --dports 12345,23456 -j DNAT --to-destination 10.1.0.1", + "-p tcp -i wg0 -j DNAT --to-destination 127.0.0.6:15001", + }, + "POSTROUTING": { + "-p udp -o eth0 -j MASQUERADE", + }, + }, + }, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rules := tt.existingRules + if rules == nil { + rules = make(map[string]map[string][]string) + } + + f := &fakeIptables{rules: rules} + + if err := updateIPTablesRules(cfg, f, "wg0", tt.istioEnabled, tt.proxyExcludedPorts); (err != nil) != tt.wantErr { + t.Errorf("updateIPTablesRules() error = %v, wantErr %v", err, tt.wantErr) + } + + if !reflect.DeepEqual(f.rules, tt.wantRules) { + t.Errorf("updateIPTablesRules() rules = %v, expected %v", f.rules, tt.wantRules) + } + }) + } +} diff --git a/pkg/agent/kubernetes.go b/pkg/agent/kubernetes.go new file mode 100644 index 0000000..aa701d9 --- /dev/null +++ b/pkg/agent/kubernetes.go @@ -0,0 +1,473 @@ +package agent + +import ( + "context" + "fmt" + "net" + "net/netip" + "slices" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" + "github.com/google/uuid" + "gopkg.in/yaml.v3" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + netv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + corev1apply "k8s.io/client-go/applyconfigurations/core/v1" + metav1apply "k8s.io/client-go/applyconfigurations/meta/v1" + netv1apply "k8s.io/client-go/applyconfigurations/networking/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + watchtools "k8s.io/client-go/tools/watch" + "k8s.io/utils/ptr" + + "github.com/steved/kubewire/pkg/config" + "github.com/steved/kubewire/pkg/kuberneteshelpers" + "github.com/steved/kubewire/pkg/runnable" + "github.com/steved/kubewire/pkg/wg" +) + +const ( + DefaultContainerAnnotationName = "kubectl.kubernetes.io/default-container" + FieldManager = "wireguard" + WireguardRevisionAnnotationName = "wgko.io/revision" + WaitTimeout = 5 * time.Minute + WireguardConfigVolumeName = "wireguard-config" + ContainerAddressPath = "/app/address" + ContainerName = "agent" +) + +type Agent interface { + runnable.Runnable + AgentAddress() netip.AddrPort +} + +type kubernetesAgent struct { + config *config.Config + client kubernetes.Interface + restConfig *rest.Config + + agentAddress netip.AddrPort +} + +func NewKubernetesAgent(config *config.Config, client kubernetes.Interface, restConfig *rest.Config) Agent { + return &kubernetesAgent{config: config, client: client, restConfig: restConfig} +} + +func (a *kubernetesAgent) AgentAddress() netip.AddrPort { + return a.agentAddress +} + +func (a *kubernetesAgent) Start(ctx context.Context) (runnable.StopFunc, error) { + var ( + matchLabels map[string]string + replaceContainerIndex int + revision = newRevision() + ) + + log := logr.FromContextOrDiscard(ctx) + accessor := meta.NewAccessor() + + objectName, err := accessor.Name(a.config.TargetObject) + if err != nil { + return nil, fmt.Errorf("unable to determine target object name: %w", err) + } + + relatedObjectName := wgObjectName(objectName) + + switch targetObject := a.config.TargetObject.(type) { + case *appsv1.Deployment: + matchLabels = targetObject.Spec.Selector.MatchLabels + + if targetObject.Spec.Template.Annotations == nil { + targetObject.Spec.Template.Annotations = make(map[string]string) + } + + targetObject.Spec.Template.Annotations[WireguardRevisionAnnotationName] = revision + targetObject.Spec.Replicas = ptr.To(int32(1)) + + replaceContainerIndex = containerIndexOrDefault(a.config.Container, targetObject.Spec.Template.Annotations, targetObject.Spec.Template.Spec.Containers) + if replaceContainerIndex == -1 { + return nil, fmt.Errorf("unable to find container to replace in target object %s/%s", targetObject.Namespace, targetObject.Name) + } + + if err := a.applyConfig(ctx, a.config.Namespace, relatedObjectName); err != nil { + return nil, fmt.Errorf("unable to create config: %w", err) + } + + a.replaceContainerWithAgent(&targetObject.Spec.Template.Spec, relatedObjectName, replaceContainerIndex) + + _, err := a.client.AppsV1().Deployments(targetObject.Namespace).Update(ctx, targetObject, v1.UpdateOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to update target object %s/%s: %w", targetObject.Namespace, targetObject.Name, err) + } + case *appsv1.StatefulSet: + matchLabels = targetObject.Spec.Selector.MatchLabels + + if targetObject.Spec.Template.Annotations == nil { + targetObject.Spec.Template.Annotations = make(map[string]string) + } + + targetObject.Spec.Template.Annotations[WireguardRevisionAnnotationName] = revision + targetObject.Spec.Replicas = ptr.To(int32(1)) + + replaceContainerIndex = containerIndexOrDefault(a.config.Container, targetObject.Spec.Template.Annotations, targetObject.Spec.Template.Spec.Containers) + if replaceContainerIndex == -1 { + return nil, fmt.Errorf("unable to find container to replace in target object %s/%s", targetObject.Namespace, targetObject.Name) + } + + if err := a.applyConfig(ctx, a.config.Namespace, relatedObjectName); err != nil { + return nil, fmt.Errorf("unable to create config: %w", err) + } + + a.replaceContainerWithAgent(&targetObject.Spec.Template.Spec, relatedObjectName, replaceContainerIndex) + + _, err := a.client.AppsV1().StatefulSets(targetObject.Namespace).Update(ctx, targetObject, v1.UpdateOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to update target object %s/%s: %w", targetObject.Namespace, targetObject.Name, err) + } + default: + return nil, fmt.Errorf("target object is not a supported type: %t", targetObject) + } + + if a.config.Wireguard.DirectAccess { + address, err := waitForPod(ctx, a.client.CoreV1().RESTClient(), a.restConfig, a.config.Namespace, matchLabels, revision) + if err != nil { + return nil, fmt.Errorf("failed to find new pod for %s/%s: %w", a.config.Namespace, objectName, err) + } + + if err := a.applyNetworkPolicy(ctx, a.config.Namespace, relatedObjectName, matchLabels, int32(address.Port())); err != nil { + return nil, fmt.Errorf("failed to create network policy for %s/%s: %w", a.config.Namespace, objectName, err) + } + + a.agentAddress = address + } else if !a.config.Wireguard.LocalAddress.IsValid() { + address, err := a.applyLoadbalancer(ctx, a.config.Namespace, relatedObjectName, matchLabels) + if err != nil { + return nil, fmt.Errorf("failed to create load balancer service for %s/%s: %w", a.config.Namespace, objectName, err) + } + + if err := a.applyNetworkPolicy(ctx, a.config.Namespace, relatedObjectName, matchLabels, int32(wg.DefaultWireguardPort)); err != nil { + return nil, fmt.Errorf("failed to create network policy for %s/%s: %w", a.config.Namespace, objectName, err) + } + + a.agentAddress = address + } + + return func() { + if a.config.KeepResources { + return + } + + if err := a.client.CoreV1().Services(a.config.Namespace).Delete(ctx, relatedObjectName, v1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) { + log.Error(err, "unable to delete service", "name", relatedObjectName) + } + + if err := a.client.NetworkingV1().NetworkPolicies(a.config.Namespace).Delete(ctx, relatedObjectName, v1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) { + log.Error(err, "unable to delete netpol", "name", relatedObjectName) + } + + if err := a.client.CoreV1().Secrets(a.config.Namespace).Delete(ctx, relatedObjectName, v1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) { + log.Error(err, "unable to delete secret", "name", relatedObjectName) + } + }, nil +} + +func (a *kubernetesAgent) applyConfig(ctx context.Context, namespace, configName string) error { + cfg, err := yaml.Marshal(a.config.Wireguard) + if err != nil { + return fmt.Errorf("unable to marshal wireguard config to YAML: %w", err) + } + + secret := corev1apply.Secret(configName, namespace).WithData(map[string][]byte{"wg.yml": cfg}) + _, err = a.client.CoreV1().Secrets(namespace).Apply(ctx, secret, v1.ApplyOptions{FieldManager: FieldManager}) + + return err +} + +func (a *kubernetesAgent) replaceContainerWithAgent(podSpec *corev1.PodSpec, configName string, containerIndex int) { + var excludePorts []string + + // Remove liveness probes in case they're checking the container we're + // replacing; if the proxy service isn't up yet these would fail. + for index, container := range podSpec.Containers { + podSpec.Containers[index].LivenessProbe = nil + podSpec.Containers[index].ReadinessProbe = nil + podSpec.Containers[index].StartupProbe = nil + + if index != containerIndex { + // Add other listeners to allow direct access without proxying through wireguard + for _, port := range container.Ports { + excludePorts = append(excludePorts, strconv.FormatInt(int64(port.ContainerPort), 10)) + } + } + } + + replacedContainer := podSpec.Containers[containerIndex] + podSpec.Containers[containerIndex] = corev1.Container{ + Name: ContainerName, + Image: a.config.AgentImage, + ImagePullPolicy: corev1.PullAlways, + // Retain ports in case they're named at the service level + Ports: replacedContainer.Ports, + Env: []corev1.EnvVar{ + { + Name: "LOCAL_PORTS_EXCLUDE_PROXY", + Value: strings.Join(excludePorts, ","), + }, + { + Name: "ISTIO_INTERCEPTION_MODE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.annotations['sidecar.istio.io/interceptionMode']", + }, + }, + }, + }, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{Add: []corev1.Capability{"NET_ADMIN"}}, + RunAsUser: ptr.To(int64(0)), + RunAsGroup: ptr.To(int64(0)), + RunAsNonRoot: ptr.To(false), + ReadOnlyRootFilesystem: ptr.To(false), + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: WireguardConfigVolumeName, + ReadOnly: true, + MountPath: "/app/config", + }, + }, + } + + volume := corev1.Volume{ + Name: WireguardConfigVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: configName, + Optional: ptr.To(false), + }, + }, + } + + volumeIndex := slices.IndexFunc(podSpec.Volumes, func(v corev1.Volume) bool { return v.Name == WireguardConfigVolumeName }) + if volumeIndex == -1 { + podSpec.Volumes = append(podSpec.Volumes, volume) + } else { + podSpec.Volumes[volumeIndex] = volume + } +} + +func (a *kubernetesAgent) applyLoadbalancer(ctx context.Context, namespace, name string, selector map[string]string) (netip.AddrPort, error) { + log := logr.FromContextOrDiscard(ctx) + + annotations := map[string]string{ + // AWS + "service.beta.kubernetes.io/aws-load-balancer-backend-protocol": "tcp", + "service.beta.kubernetes.io/aws-load-balancer-internal": "false", + "service.beta.kubernetes.io/aws-load-balancer-type": "nlb", + "service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled": "true", + // GCP + "cloud.google.com/l4-rbs": "enabled", + // No Azure annotations necessary + } + + service := corev1apply.Service(name, namespace). + WithAnnotations(annotations). + WithSpec(&corev1apply.ServiceSpecApplyConfiguration{ + Ports: []corev1apply.ServicePortApplyConfiguration{{ + Name: ptr.To("wireguard"), + Protocol: ptr.To(corev1.ProtocolUDP), + Port: ptr.To(int32(wg.DefaultWireguardPort)), + TargetPort: ptr.To(intstr.FromInt32(wg.DefaultWireguardPort)), + }}, + Selector: selector, + Type: ptr.To(corev1.ServiceTypeLoadBalancer), + ExternalTrafficPolicy: ptr.To(corev1.ServiceExternalTrafficPolicyLocal), + InternalTrafficPolicy: ptr.To(corev1.ServiceInternalTrafficPolicyLocal), + }) + + _, err := a.client.CoreV1().Services(namespace).Apply(ctx, service, v1.ApplyOptions{FieldManager: FieldManager}) + if err != nil { + return netip.AddrPort{}, fmt.Errorf("unable to apply service: %w", err) + } + + svc, err := waitForLoadBalancerReady(ctx, a.client.CoreV1().RESTClient(), namespace, name) + if err != nil { + return netip.AddrPort{}, fmt.Errorf("timeout after %s waiting for load balancer service to be ready: %w", WaitTimeout.String(), err) + } + + ing := svc.Status.LoadBalancer.Ingress[0] + + if ing.Hostname != "" { + var ip netip.Addr + + resolveCtx, cancel := context.WithTimeout(ctx, WaitTimeout) + defer cancel() + + log.Info("Load balancer ready, waiting for DNS to resolve", "hostname", ing.Hostname) + + err = wait.PollUntilContextCancel(resolveCtx, 5*time.Second, true, func(ctx context.Context) (bool, error) { + ips, _ := net.DefaultResolver.LookupIP(ctx, "ip4", ing.Hostname) + if len(ips) == 0 { + return false, nil + } + + ip = netip.AddrFrom4([4]byte(ips[0])) + + return true, nil + }) + + if err != nil { + return netip.AddrPort{}, fmt.Errorf("unable to lookup IP for load balancer hostname %q: %w", ing.Hostname, err) + } + + return netip.AddrPortFrom(ip, wg.DefaultWireguardPort), nil + } else if ing.IP != "" { + ip, err := netip.ParseAddr(ing.IP) + if err != nil { + return netip.AddrPort{}, fmt.Errorf("unable to parse IP for load balancer IP %q: %w", ing.IP, err) + } + + return netip.AddrPortFrom(ip, wg.DefaultWireguardPort), nil + } + + return netip.AddrPort{}, fmt.Errorf("unable to find load balancer address for service %q", svc.Name) +} + +func (a *kubernetesAgent) applyNetworkPolicy(ctx context.Context, namespace, name string, selector map[string]string, port int32) error { + netpol := netv1apply.NetworkPolicy(name, namespace). + WithSpec(&netv1apply.NetworkPolicySpecApplyConfiguration{ + PodSelector: &metav1apply.LabelSelectorApplyConfiguration{MatchLabels: selector}, + Ingress: []netv1apply.NetworkPolicyIngressRuleApplyConfiguration{{ + Ports: []netv1apply.NetworkPolicyPortApplyConfiguration{{ + Protocol: ptr.To(corev1.ProtocolUDP), + Port: ptr.To(intstr.FromInt32(port)), + }}, + }}, + PolicyTypes: []netv1.PolicyType{netv1.PolicyTypeIngress}, + }) + + _, err := a.client.NetworkingV1().NetworkPolicies(namespace).Apply(ctx, netpol, v1.ApplyOptions{FieldManager: FieldManager}) + + return err +} + +func wgObjectName(name string) string { + return fmt.Sprintf("wg-%s", name) +} + +func containerIndexOrDefault(containerName string, annotations map[string]string, containers []corev1.Container) int { + if len(containers) == 0 { + return -1 + } + + if containerName == "" { + if name := annotations[DefaultContainerAnnotationName]; len(name) > 0 { + containerName = name + } else { + containerName = containers[0].Name + } + } + + return slices.IndexFunc(containers, func(container corev1.Container) bool { return container.Name == containerName }) +} + +func podReady(c corev1.PodCondition) bool { + return c.Status == corev1.ConditionTrue && c.Type == corev1.PodReady +} + +var waitForPod = func(ctx context.Context, client cache.Getter, restConfig *rest.Config, namespace string, matchLabels map[string]string, revision string) (address netip.AddrPort, err error) { + log := logr.FromContextOrDiscard(ctx) + + lw := cache.NewFilteredListWatchFromClient(client, "pods", namespace, func(o *v1.ListOptions) { + o.LabelSelector = labels.SelectorFromSet(matchLabels).String() + }) + + deadlineCtx, cancel := context.WithTimeout(ctx, WaitTimeout) + defer cancel() + + log.Info("Waiting for new pod to by ready", "revision", revision) + + sync, syncErr := watchtools.UntilWithSync(deadlineCtx, lw, &corev1.Pod{}, nil, func(event watch.Event) (bool, error) { + pod := event.Object.(*corev1.Pod) + + return pod.Annotations[WireguardRevisionAnnotationName] == revision && slices.ContainsFunc(pod.Status.Conditions, podReady), nil + }) + if syncErr != nil { + err = fmt.Errorf("timeout after %s waiting for pod to be ready: %w", WaitTimeout.String(), syncErr) + return + } + + pod := sync.Object.(*corev1.Pod) + log = log.WithValues("pod", pod.Name) + + log.Info("Waiting for pod remote address", "pod", pod.Name) + + pollErr := wait.PollUntilContextCancel(deadlineCtx, 5*time.Second, true, func(ctx context.Context) (bool, error) { + contents, err := kuberneteshelpers.FileContents(ctx, restConfig, pod, ContainerName, ContainerAddressPath) + if err != nil { + log.V(1).Info("unable to read address from pod", "error", err.Error()) + return false, nil + } + + podAddress := strings.Split(contents, "\n")[0] + if podAddress == "" { + log.V(1).Info("unable to read address from pod", "error", err.Error()) + return false, nil + } + + address, err = netip.ParseAddrPort(podAddress) + if err != nil { + log.V(1).Info("unable to read address from pod", "error", err.Error()) + return false, nil + } + + return true, nil + }) + if pollErr != nil { + err = fmt.Errorf("timeout after %s waiting for pod address: %w", WaitTimeout.String(), pollErr) + } + + return +} + +var waitForLoadBalancerReady = func(ctx context.Context, client cache.Getter, namespace, name string) (*corev1.Service, error) { + log := logr.FromContextOrDiscard(ctx) + + log.Info("Waiting for load balancer to be ready", "service", name, "namespace", namespace) + + lw := cache.NewListWatchFromClient(client, "services", namespace, fields.OneTermEqualSelector("metadata.name", name)) + + deadlineCtx, cancel := context.WithTimeout(ctx, WaitTimeout) + defer cancel() + + sync, err := watchtools.UntilWithSync(deadlineCtx, lw, &corev1.Service{}, nil, func(event watch.Event) (bool, error) { + svc := event.Object.(*corev1.Service) + if len(svc.Status.LoadBalancer.Ingress) > 0 { + return true, nil + } + + return false, nil + }) + + if err != nil { + return nil, err + } + + return sync.Object.(*corev1.Service), nil +} + +var newRevision = func() string { return uuid.New().String() } diff --git a/pkg/agent/kubernetes_test.go b/pkg/agent/kubernetes_test.go new file mode 100644 index 0000000..4c38037 --- /dev/null +++ b/pkg/agent/kubernetes_test.go @@ -0,0 +1,1163 @@ +package agent + +import ( + "context" + "fmt" + "net/netip" + "testing" + + "github.com/stretchr/testify/assert" + "gopkg.in/yaml.v3" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "k8s.io/utils/ptr" + + "github.com/steved/kubewire/pkg/config" + "github.com/steved/kubewire/pkg/runnable" + "github.com/steved/kubewire/pkg/wg" +) + +var ( + agentImage = "agent-image" + namespace = "test-namespace" + objectName = "test-object" + relatedObjectName = fmt.Sprintf("wg-%s", objectName) + selector = map[string]string{"app.kubernetes.io/name": objectName} + agentAddr = netip.MustParseAddrPort("4.5.6.7:19017") +) + +func testAgent(t *testing.T, obj runtime.Object, cfg *config.Config, f func(t *testing.T, stop runnable.StopFunc, client kubernetes.Interface, remoteAddr netip.AddrPort)) { + newRevision = func() string { return "1-2-3-4" } + + waitForLoadBalancerReady = func(_ context.Context, _ cache.Getter, namespace, name string) (*corev1.Service, error) { + return &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Status: corev1.ServiceStatus{ + LoadBalancer: corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{{Hostname: "example.com"}}, + }, + }, + }, nil + } + + waitForPod = func(_ context.Context, _ cache.Getter, _ *rest.Config, _ string, _ map[string]string, _ string) (address netip.AddrPort, err error) { + return agentAddr, nil + } + + cfg.TargetObject = obj + cfg.Namespace = namespace + cfg.AgentImage = agentImage + + client := fake.NewClientset(obj) + a := NewKubernetesAgent(cfg, client, nil) + stop, err := a.Start(context.Background()) + + assert.NoError(t, err) + + f(t, stop, client, a.AgentAddress()) +} + +func TestAgentDeployment(t *testing.T) { + deployment := &appsv1.Deployment{ + ObjectMeta: v1.ObjectMeta{Name: objectName, Namespace: namespace}, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(3)), + Selector: &v1.LabelSelector{ + MatchLabels: selector, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "test-container", Image: "test-image"}, + }, + }, + }, + }, + } + + t.Run("deployment", func(t *testing.T) { + testAgent(t, deployment.DeepCopy(), config.NewConfig(), func(t *testing.T, _ runnable.StopFunc, client kubernetes.Interface, remoteAddr netip.AddrPort) { + assert.True(t, remoteAddr.IsValid(), "failed to return a valid remote address") + + service, err := client.CoreV1().Services(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + if assert.NoError(t, err) { + assert.Equal( + t, + map[string]string{ + "service.beta.kubernetes.io/aws-load-balancer-backend-protocol": "tcp", + "service.beta.kubernetes.io/aws-load-balancer-internal": "false", + "service.beta.kubernetes.io/aws-load-balancer-type": "nlb", + "service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled": "true", + "cloud.google.com/l4-rbs": "enabled", + }, + service.ObjectMeta.Annotations) + + assert.Equal( + t, + corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Name: "wireguard", + Protocol: corev1.ProtocolUDP, + Port: int32(wg.DefaultWireguardPort), + TargetPort: intstr.FromInt32(wg.DefaultWireguardPort), + }}, + Selector: selector, + Type: corev1.ServiceTypeLoadBalancer, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyLocal, + InternalTrafficPolicy: ptr.To(corev1.ServiceInternalTrafficPolicyLocal), + }, + service.Spec, + ) + } + + deployment, err := client.AppsV1().Deployments(namespace).Get(context.Background(), objectName, v1.GetOptions{}) + if assert.NoError(t, err) { + assert.Equal( + t, + appsv1.DeploymentSpec{ + Selector: &v1.LabelSelector{MatchLabels: selector}, + Replicas: ptr.To(int32(1)), + Template: corev1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + WireguardRevisionAnnotationName: "1-2-3-4", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: ContainerName, + Image: agentImage, + ImagePullPolicy: corev1.PullAlways, + // Retain ports in case they're named at the service level + Ports: nil, + Env: []corev1.EnvVar{ + { + Name: "LOCAL_PORTS_EXCLUDE_PROXY", + Value: "", + }, + { + Name: "ISTIO_INTERCEPTION_MODE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.annotations['sidecar.istio.io/interceptionMode']", + }, + }, + }, + }, + + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{Add: []corev1.Capability{"NET_ADMIN"}}, + RunAsUser: ptr.To(int64(0)), + RunAsGroup: ptr.To(int64(0)), + RunAsNonRoot: ptr.To(false), + ReadOnlyRootFilesystem: ptr.To(false), + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: WireguardConfigVolumeName, + ReadOnly: true, + MountPath: "/app/config", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: WireguardConfigVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: relatedObjectName, + Optional: ptr.To(false), + }, + }, + }, + }, + }, + }, + }, + deployment.Spec, + ) + } + + netpol, err := client.NetworkingV1().NetworkPolicies(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + if assert.NoError(t, err) { + assert.Equal( + t, + networkingv1.NetworkPolicySpec{ + PodSelector: v1.LabelSelector{MatchLabels: selector}, + Ingress: []networkingv1.NetworkPolicyIngressRule{{ + Ports: []networkingv1.NetworkPolicyPort{{ + Protocol: ptr.To(corev1.ProtocolUDP), + Port: ptr.To(intstr.FromInt32(19070)), + }}, + }}, + PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress}, + }, + netpol.Spec, + ) + } + + secret, err := client.CoreV1().Secrets(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + if assert.NoError(t, err) { + err = yaml.Unmarshal(secret.Data["wg.yml"], &config.Wireguard{}) + assert.NoError(t, err) + } + }) + }) + + t.Run("deployment with multiple containers", func(t *testing.T) { + testDeployment := deployment.DeepCopy() + testDeployment.Spec.Template.Annotations = map[string]string{DefaultContainerAnnotationName: "test-container"} + testDeployment.Spec.Template.Spec.Containers = []corev1.Container{ + {Name: "istio", Ports: []corev1.ContainerPort{{Name: "istio-proxy", ContainerPort: 15001}}}, + {Name: "test-container", Image: "test-image"}, + { + Name: "other-container", + LivenessProbe: &corev1.Probe{InitialDelaySeconds: 10}, + ReadinessProbe: &corev1.Probe{InitialDelaySeconds: 11}, + StartupProbe: &corev1.Probe{InitialDelaySeconds: 12}, + Ports: []corev1.ContainerPort{{Name: "other", ContainerPort: 12345}}, + }, + } + + testAgent(t, testDeployment, config.NewConfig(), func(t *testing.T, _ runnable.StopFunc, client kubernetes.Interface, _ netip.AddrPort) { + deployment, err := client.AppsV1().Deployments(namespace).Get(context.Background(), objectName, v1.GetOptions{}) + if assert.NoError(t, err) { + assert.Equal( + t, + appsv1.DeploymentSpec{ + Selector: &v1.LabelSelector{MatchLabels: selector}, + Replicas: ptr.To(int32(1)), + Template: corev1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + WireguardRevisionAnnotationName: "1-2-3-4", + DefaultContainerAnnotationName: "test-container", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "istio", Ports: []corev1.ContainerPort{{Name: "istio-proxy", ContainerPort: 15001}}}, + { + Name: ContainerName, + Image: agentImage, + ImagePullPolicy: corev1.PullAlways, + // Retain ports in case they're named at the service level + Ports: nil, + Env: []corev1.EnvVar{ + { + Name: "LOCAL_PORTS_EXCLUDE_PROXY", + Value: "15001,12345", + }, + { + Name: "ISTIO_INTERCEPTION_MODE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.annotations['sidecar.istio.io/interceptionMode']", + }, + }, + }, + }, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{Add: []corev1.Capability{"NET_ADMIN"}}, + RunAsUser: ptr.To(int64(0)), + RunAsGroup: ptr.To(int64(0)), + RunAsNonRoot: ptr.To(false), + ReadOnlyRootFilesystem: ptr.To(false), + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: WireguardConfigVolumeName, + ReadOnly: true, + MountPath: "/app/config", + }, + }, + }, + { + Name: "other-container", + Ports: []corev1.ContainerPort{{Name: "other", ContainerPort: 12345}}, + }, + }, + Volumes: []corev1.Volume{ + { + Name: WireguardConfigVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: relatedObjectName, + Optional: ptr.To(false), + }, + }, + }, + }, + }, + }, + }, + deployment.Spec, + ) + } + }) + }) + + t.Run("deployment with existing volume", func(t *testing.T) { + testDeployment := deployment.DeepCopy() + testDeployment.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: WireguardConfigVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "different-secret-name", + Optional: ptr.To(true), + }, + }, + }, + } + + testAgent(t, testDeployment, config.NewConfig(), func(t *testing.T, _ runnable.StopFunc, client kubernetes.Interface, _ netip.AddrPort) { + deployment, err := client.AppsV1().Deployments(namespace).Get(context.Background(), objectName, v1.GetOptions{}) + if assert.NoError(t, err) { + assert.Equal( + t, + []corev1.Volume{ + { + Name: WireguardConfigVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: relatedObjectName, + Optional: ptr.To(false), + }, + }, + }, + }, + deployment.Spec.Template.Spec.Volumes, + ) + } + }) + }) + + t.Run("deployment with direct", func(t *testing.T) { + cfg := config.NewConfig() + cfg.Wireguard.DirectAccess = true + + testAgent(t, deployment, cfg, func(t *testing.T, _ runnable.StopFunc, client kubernetes.Interface, remoteAddr netip.AddrPort) { + assert.Equal(t, agentAddr, remoteAddr) + + _, err := client.CoreV1().Services(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + assert.True(t, errors.IsNotFound(err)) + + deployment, err := client.AppsV1().Deployments(namespace).Get(context.Background(), objectName, v1.GetOptions{}) + if assert.NoError(t, err) { + assert.Equal( + t, + appsv1.DeploymentSpec{ + Selector: &v1.LabelSelector{MatchLabels: selector}, + Replicas: ptr.To(int32(1)), + Template: corev1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + WireguardRevisionAnnotationName: "1-2-3-4", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: ContainerName, + Image: agentImage, + ImagePullPolicy: corev1.PullAlways, + // Retain ports in case they're named at the service level + Ports: nil, + Env: []corev1.EnvVar{ + { + Name: "LOCAL_PORTS_EXCLUDE_PROXY", + Value: "", + }, + { + Name: "ISTIO_INTERCEPTION_MODE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.annotations['sidecar.istio.io/interceptionMode']", + }, + }, + }, + }, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{Add: []corev1.Capability{"NET_ADMIN"}}, + RunAsUser: ptr.To(int64(0)), + RunAsGroup: ptr.To(int64(0)), + RunAsNonRoot: ptr.To(false), + ReadOnlyRootFilesystem: ptr.To(false), + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: WireguardConfigVolumeName, + ReadOnly: true, + MountPath: "/app/config", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: WireguardConfigVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: relatedObjectName, + Optional: ptr.To(false), + }, + }, + }, + }, + }, + }, + }, + deployment.Spec, + ) + } + + netpol, err := client.NetworkingV1().NetworkPolicies(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + if assert.NoError(t, err) { + assert.Equal( + t, + networkingv1.NetworkPolicySpec{ + PodSelector: v1.LabelSelector{MatchLabels: selector}, + Ingress: []networkingv1.NetworkPolicyIngressRule{{ + Ports: []networkingv1.NetworkPolicyPort{{ + Protocol: ptr.To(corev1.ProtocolUDP), + Port: ptr.To(intstr.FromInt32(19017)), + }}, + }}, + PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress}, + }, + netpol.Spec, + ) + } + + secret, err := client.CoreV1().Secrets(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + if assert.NoError(t, err) { + err = yaml.Unmarshal(secret.Data["wg.yml"], &config.Wireguard{}) + assert.NoError(t, err) + } + }) + }) + + t.Run("deployment with local address", func(t *testing.T) { + cfg := config.NewConfig() + cfg.Wireguard.LocalAddress = netip.MustParseAddrPort("7.6.5.4:33010") + + testAgent(t, deployment, cfg, func(t *testing.T, _ runnable.StopFunc, client kubernetes.Interface, remoteAddr netip.AddrPort) { + assert.True(t, !remoteAddr.IsValid(), "agent address expected to be nil") + + _, err := client.CoreV1().Services(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + assert.True(t, errors.IsNotFound(err)) + + _, err = client.NetworkingV1().NetworkPolicies(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + assert.True(t, errors.IsNotFound(err)) + + deployment, err := client.AppsV1().Deployments(namespace).Get(context.Background(), objectName, v1.GetOptions{}) + if assert.NoError(t, err) { + assert.Equal( + t, + appsv1.DeploymentSpec{ + Selector: &v1.LabelSelector{MatchLabels: selector}, + Replicas: ptr.To(int32(1)), + Template: corev1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + WireguardRevisionAnnotationName: "1-2-3-4", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: ContainerName, + Image: agentImage, + ImagePullPolicy: corev1.PullAlways, + // Retain ports in case they're named at the service level + Ports: nil, + Env: []corev1.EnvVar{ + { + Name: "LOCAL_PORTS_EXCLUDE_PROXY", + Value: "", + }, + { + Name: "ISTIO_INTERCEPTION_MODE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.annotations['sidecar.istio.io/interceptionMode']", + }, + }, + }, + }, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{Add: []corev1.Capability{"NET_ADMIN"}}, + RunAsUser: ptr.To(int64(0)), + RunAsGroup: ptr.To(int64(0)), + RunAsNonRoot: ptr.To(false), + ReadOnlyRootFilesystem: ptr.To(false), + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: WireguardConfigVolumeName, + ReadOnly: true, + MountPath: "/app/config", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: WireguardConfigVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: relatedObjectName, + Optional: ptr.To(false), + }, + }, + }, + }, + }, + }, + }, + deployment.Spec, + ) + } + + secret, err := client.CoreV1().Secrets(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + if assert.NoError(t, err) { + err = yaml.Unmarshal(secret.Data["wg.yml"], &config.Wireguard{}) + assert.NoError(t, err) + } + }) + }) + + t.Run("deployment down", func(t *testing.T) { + cfg := config.NewConfig() + + testAgent(t, deployment, cfg, func(t *testing.T, stop runnable.StopFunc, client kubernetes.Interface, _ netip.AddrPort) { + cfg.KeepResources = true + + stop() + + _, err := client.CoreV1().Services(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + assert.NoError(t, err) + + _, err = client.NetworkingV1().NetworkPolicies(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + assert.NoError(t, err) + + _, err = client.CoreV1().Secrets(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + assert.NoError(t, err) + + cfg.KeepResources = false + + stop() + + _, err = client.CoreV1().Services(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + assert.True(t, errors.IsNotFound(err)) + + _, err = client.NetworkingV1().NetworkPolicies(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + assert.True(t, errors.IsNotFound(err)) + + _, err = client.CoreV1().Secrets(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + assert.True(t, errors.IsNotFound(err)) + }) + }) +} + +func TestAgentStatefulset(t *testing.T) { + statefulset := &appsv1.StatefulSet{ + ObjectMeta: v1.ObjectMeta{Name: objectName, Namespace: namespace}, + Spec: appsv1.StatefulSetSpec{ + Replicas: ptr.To(int32(3)), + Selector: &v1.LabelSelector{ + MatchLabels: selector, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "test-container", Image: "test-image"}, + }, + }, + }, + }, + } + + t.Run("statefulset", func(t *testing.T) { + testAgent(t, statefulset.DeepCopy(), config.NewConfig(), func(t *testing.T, _ runnable.StopFunc, client kubernetes.Interface, remoteAddr netip.AddrPort) { + assert.True(t, remoteAddr.IsValid(), "failed to return a valid remote address") + + service, err := client.CoreV1().Services(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + if assert.NoError(t, err) { + assert.Equal( + t, + map[string]string{ + "service.beta.kubernetes.io/aws-load-balancer-backend-protocol": "tcp", + "service.beta.kubernetes.io/aws-load-balancer-internal": "false", + "service.beta.kubernetes.io/aws-load-balancer-type": "nlb", + "service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled": "true", + "cloud.google.com/l4-rbs": "enabled", + }, + service.ObjectMeta.Annotations) + + assert.Equal( + t, + corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Name: "wireguard", + Protocol: corev1.ProtocolUDP, + Port: int32(wg.DefaultWireguardPort), + TargetPort: intstr.FromInt32(wg.DefaultWireguardPort), + }}, + Selector: selector, + Type: corev1.ServiceTypeLoadBalancer, + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyLocal, + InternalTrafficPolicy: ptr.To(corev1.ServiceInternalTrafficPolicyLocal), + }, + service.Spec, + ) + } + + sts, err := client.AppsV1().StatefulSets(namespace).Get(context.Background(), objectName, v1.GetOptions{}) + if assert.NoError(t, err) { + assert.Equal( + t, + appsv1.StatefulSetSpec{ + Selector: &v1.LabelSelector{MatchLabels: selector}, + Replicas: ptr.To(int32(1)), + Template: corev1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + WireguardRevisionAnnotationName: "1-2-3-4", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: ContainerName, + Image: agentImage, + ImagePullPolicy: corev1.PullAlways, + // Retain ports in case they're named at the service level + Ports: nil, + Env: []corev1.EnvVar{ + { + Name: "LOCAL_PORTS_EXCLUDE_PROXY", + Value: "", + }, + + { + Name: "ISTIO_INTERCEPTION_MODE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.annotations['sidecar.istio.io/interceptionMode']", + }, + }, + }, + }, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{Add: []corev1.Capability{"NET_ADMIN"}}, + RunAsUser: ptr.To(int64(0)), + RunAsGroup: ptr.To(int64(0)), + RunAsNonRoot: ptr.To(false), + ReadOnlyRootFilesystem: ptr.To(false), + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: WireguardConfigVolumeName, + ReadOnly: true, + MountPath: "/app/config", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: WireguardConfigVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: relatedObjectName, + Optional: ptr.To(false), + }, + }, + }, + }, + }, + }, + }, + sts.Spec, + ) + } + + netpol, err := client.NetworkingV1().NetworkPolicies(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + if assert.NoError(t, err) { + assert.Equal( + t, + networkingv1.NetworkPolicySpec{ + PodSelector: v1.LabelSelector{MatchLabels: selector}, + Ingress: []networkingv1.NetworkPolicyIngressRule{{ + Ports: []networkingv1.NetworkPolicyPort{{ + Protocol: ptr.To(corev1.ProtocolUDP), + Port: ptr.To(intstr.FromInt32(19070)), + }}, + }}, + PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress}, + }, + netpol.Spec, + ) + } + + secret, err := client.CoreV1().Secrets(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + if assert.NoError(t, err) { + err = yaml.Unmarshal(secret.Data["wg.yml"], &config.Wireguard{}) + assert.NoError(t, err) + } + }) + }) + + t.Run("statefulset with multiple containers", func(t *testing.T) { + testStatefulset := statefulset.DeepCopy() + testStatefulset.Spec.Template.Annotations = map[string]string{DefaultContainerAnnotationName: "test-container"} + testStatefulset.Spec.Template.Spec.Containers = []corev1.Container{ + {Name: "istio", Ports: []corev1.ContainerPort{{Name: "istio-proxy", ContainerPort: 15001}}}, + {Name: "test-container", Image: "test-image"}, + { + Name: "other-container", + LivenessProbe: &corev1.Probe{InitialDelaySeconds: 10}, + ReadinessProbe: &corev1.Probe{InitialDelaySeconds: 11}, + StartupProbe: &corev1.Probe{InitialDelaySeconds: 12}, + Ports: []corev1.ContainerPort{{Name: "other", ContainerPort: 12345}}, + }, + } + + testAgent(t, testStatefulset, config.NewConfig(), func(t *testing.T, _ runnable.StopFunc, client kubernetes.Interface, _ netip.AddrPort) { + sts, err := client.AppsV1().StatefulSets(namespace).Get(context.Background(), objectName, v1.GetOptions{}) + if assert.NoError(t, err) { + assert.Equal( + t, + appsv1.StatefulSetSpec{ + Selector: &v1.LabelSelector{MatchLabels: selector}, + Replicas: ptr.To(int32(1)), + Template: corev1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + WireguardRevisionAnnotationName: "1-2-3-4", + DefaultContainerAnnotationName: "test-container", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "istio", Ports: []corev1.ContainerPort{{Name: "istio-proxy", ContainerPort: 15001}}}, + { + Name: ContainerName, + Image: agentImage, + ImagePullPolicy: corev1.PullAlways, + // Retain ports in case they're named at the service level + Ports: nil, + Env: []corev1.EnvVar{ + { + Name: "LOCAL_PORTS_EXCLUDE_PROXY", + Value: "15001,12345", + }, + { + Name: "ISTIO_INTERCEPTION_MODE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.annotations['sidecar.istio.io/interceptionMode']", + }, + }, + }, + }, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{Add: []corev1.Capability{"NET_ADMIN"}}, + RunAsUser: ptr.To(int64(0)), + RunAsGroup: ptr.To(int64(0)), + RunAsNonRoot: ptr.To(false), + ReadOnlyRootFilesystem: ptr.To(false), + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: WireguardConfigVolumeName, + ReadOnly: true, + MountPath: "/app/config", + }, + }, + }, + { + Name: "other-container", + Ports: []corev1.ContainerPort{{Name: "other", ContainerPort: 12345}}, + }, + }, + Volumes: []corev1.Volume{ + { + Name: WireguardConfigVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: relatedObjectName, + Optional: ptr.To(false), + }, + }, + }, + }, + }, + }, + }, + sts.Spec, + ) + } + }) + }) + + t.Run("deployment with existing volume", func(t *testing.T) { + testStatefulset := statefulset.DeepCopy() + testStatefulset.Spec.Template.Spec.Volumes = []corev1.Volume{ + { + Name: WireguardConfigVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "different-secret-name", + Optional: ptr.To(true), + }, + }, + }, + } + + testAgent(t, testStatefulset, config.NewConfig(), func(t *testing.T, _ runnable.StopFunc, client kubernetes.Interface, _ netip.AddrPort) { + sts, err := client.AppsV1().StatefulSets(namespace).Get(context.Background(), objectName, v1.GetOptions{}) + if assert.NoError(t, err) { + assert.Equal( + t, + []corev1.Volume{ + { + Name: WireguardConfigVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: relatedObjectName, + Optional: ptr.To(false), + }, + }, + }, + }, + sts.Spec.Template.Spec.Volumes, + ) + } + }) + }) + + t.Run("statefulset with direct", func(t *testing.T) { + cfg := config.NewConfig() + cfg.Wireguard.DirectAccess = true + + testAgent(t, statefulset, cfg, func(t *testing.T, _ runnable.StopFunc, client kubernetes.Interface, remoteAddr netip.AddrPort) { + assert.Equal(t, agentAddr, remoteAddr) + + _, err := client.CoreV1().Services(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + assert.True(t, errors.IsNotFound(err)) + + sts, err := client.AppsV1().StatefulSets(namespace).Get(context.Background(), objectName, v1.GetOptions{}) + if assert.NoError(t, err) { + assert.Equal( + t, + appsv1.StatefulSetSpec{ + Selector: &v1.LabelSelector{MatchLabels: selector}, + Replicas: ptr.To(int32(1)), + Template: corev1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + WireguardRevisionAnnotationName: "1-2-3-4", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: ContainerName, + Image: agentImage, + ImagePullPolicy: corev1.PullAlways, + // Retain ports in case they're named at the service level + Ports: nil, + Env: []corev1.EnvVar{ + { + Name: "LOCAL_PORTS_EXCLUDE_PROXY", + Value: "", + }, + { + Name: "ISTIO_INTERCEPTION_MODE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.annotations['sidecar.istio.io/interceptionMode']", + }, + }, + }, + }, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{Add: []corev1.Capability{"NET_ADMIN"}}, + RunAsUser: ptr.To(int64(0)), + RunAsGroup: ptr.To(int64(0)), + RunAsNonRoot: ptr.To(false), + ReadOnlyRootFilesystem: ptr.To(false), + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: WireguardConfigVolumeName, + ReadOnly: true, + MountPath: "/app/config", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: WireguardConfigVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: relatedObjectName, + Optional: ptr.To(false), + }, + }, + }, + }, + }, + }, + }, + sts.Spec, + ) + } + + netpol, err := client.NetworkingV1().NetworkPolicies(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + if assert.NoError(t, err) { + assert.Equal( + t, + networkingv1.NetworkPolicySpec{ + PodSelector: v1.LabelSelector{MatchLabels: selector}, + Ingress: []networkingv1.NetworkPolicyIngressRule{{ + Ports: []networkingv1.NetworkPolicyPort{{ + Protocol: ptr.To(corev1.ProtocolUDP), + Port: ptr.To(intstr.FromInt32(19017)), + }}, + }}, + PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress}, + }, + netpol.Spec, + ) + } + + secret, err := client.CoreV1().Secrets(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + if assert.NoError(t, err) { + err = yaml.Unmarshal(secret.Data["wg.yml"], &config.Wireguard{}) + assert.NoError(t, err) + } + }) + }) + + t.Run("statefulset with local address", func(t *testing.T) { + cfg := config.NewConfig() + cfg.Wireguard.LocalAddress = netip.MustParseAddrPort("7.6.5.4:33010") + + testAgent(t, statefulset, cfg, func(t *testing.T, _ runnable.StopFunc, client kubernetes.Interface, remoteAddr netip.AddrPort) { + assert.True(t, !remoteAddr.IsValid(), "agent address expected to be nil") + + _, err := client.CoreV1().Services(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + assert.True(t, errors.IsNotFound(err)) + + _, err = client.NetworkingV1().NetworkPolicies(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + assert.True(t, errors.IsNotFound(err)) + + sts, err := client.AppsV1().StatefulSets(namespace).Get(context.Background(), objectName, v1.GetOptions{}) + if assert.NoError(t, err) { + assert.Equal( + t, + appsv1.StatefulSetSpec{ + Selector: &v1.LabelSelector{MatchLabels: selector}, + Replicas: ptr.To(int32(1)), + Template: corev1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + WireguardRevisionAnnotationName: "1-2-3-4", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: ContainerName, + Image: agentImage, + ImagePullPolicy: corev1.PullAlways, + // Retain ports in case they're named at the service level + Ports: nil, + Env: []corev1.EnvVar{ + { + Name: "LOCAL_PORTS_EXCLUDE_PROXY", + Value: "", + }, + { + Name: "ISTIO_INTERCEPTION_MODE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.annotations['sidecar.istio.io/interceptionMode']", + }, + }, + }, + }, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{Add: []corev1.Capability{"NET_ADMIN"}}, + RunAsUser: ptr.To(int64(0)), + RunAsGroup: ptr.To(int64(0)), + RunAsNonRoot: ptr.To(false), + ReadOnlyRootFilesystem: ptr.To(false), + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: WireguardConfigVolumeName, + ReadOnly: true, + MountPath: "/app/config", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: WireguardConfigVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: relatedObjectName, + Optional: ptr.To(false), + }, + }, + }, + }, + }, + }, + }, + sts.Spec, + ) + } + + secret, err := client.CoreV1().Secrets(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + if assert.NoError(t, err) { + err = yaml.Unmarshal(secret.Data["wg.yml"], &config.Wireguard{}) + assert.NoError(t, err) + } + }) + }) + + t.Run("statefulset stop", func(t *testing.T) { + cfg := config.NewConfig() + + testAgent(t, statefulset, cfg, func(t *testing.T, stop runnable.StopFunc, client kubernetes.Interface, _ netip.AddrPort) { + cfg.KeepResources = true + + stop() + + _, err := client.CoreV1().Services(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + assert.NoError(t, err) + + _, err = client.NetworkingV1().NetworkPolicies(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + assert.NoError(t, err) + + _, err = client.CoreV1().Secrets(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + assert.NoError(t, err) + + cfg.KeepResources = false + + stop() + + _, err = client.CoreV1().Services(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + assert.True(t, errors.IsNotFound(err)) + + _, err = client.NetworkingV1().NetworkPolicies(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + assert.True(t, errors.IsNotFound(err)) + + _, err = client.CoreV1().Secrets(namespace).Get(context.Background(), relatedObjectName, v1.GetOptions{}) + assert.True(t, errors.IsNotFound(err)) + }) + }) +} + +func Test_containerIndexOrDefault(t *testing.T) { + tests := []struct { + name string + containerName string + annotations map[string]string + containers []corev1.Container + want int + }{ + { + "no containers", + "first", + map[string]string{}, + []corev1.Container{}, + -1, + }, + { + "no containers, no name", + "", + map[string]string{}, + []corev1.Container{}, + -1, + }, + { + "one container", + "first", + map[string]string{}, + []corev1.Container{ + {Name: "first"}, + }, + 0, + }, + { + "empty annotation", + "", + map[string]string{DefaultContainerAnnotationName: ""}, + []corev1.Container{ + {Name: "first"}, + }, + 0, + }, + { + "annotation with missing pod", + "", + map[string]string{DefaultContainerAnnotationName: "second"}, + []corev1.Container{ + {Name: "first"}, + }, + -1, + }, + { + "annotation", + "", + map[string]string{DefaultContainerAnnotationName: "second"}, + []corev1.Container{ + {Name: "first"}, + {Name: "second"}, + }, + 1, + }, + { + "missing", + "third", + map[string]string{}, + []corev1.Container{ + {Name: "first"}, + {Name: "second"}, + }, + -1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := containerIndexOrDefault(tt.containerName, tt.annotations, tt.containers); got != tt.want { + t.Errorf("containerIndexOrDefault() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/config/config.go b/pkg/config/config.go new file mode 100644 index 0000000..67292c3 --- /dev/null +++ b/pkg/config/config.go @@ -0,0 +1,142 @@ +package config + +import ( + "net/netip" + + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/steved/kubewire/pkg/kuberneteshelpers" +) + +type Config struct { + // AgentImage is the container image reference to use within Kubernetes + AgentImage string + + // TargetObject is the target Kubernetes object to proxy access for + TargetObject runtime.Object + // Namespace is the namespace of the target Kubernetes object + Namespace string + // Container is the name of the container to target within the Kubernetes object + Container string + + // KeepResources will prevent load balancers and other created resources from being deleted when exiting + KeepResources bool + + // Wireguard contains wireguard-specific configuration + Wireguard Wireguard + + // KubernetesClusterDetails contains resolved information about the K8s cluster + KubernetesClusterDetails kuberneteshelpers.ClusterDetails +} + +func NewConfig() *Config { + return &Config{} +} + +// Wireguard represents configuration needed to set up wireguard in two different contexts: Local and Kubernetes Agent +type Wireguard struct { + // DirectAccess controls whether to attempt NAT hole-punching or use load balancers to access the target pod + DirectAccess bool + + // LocalKey always represents the keypair associated with the machine we're connecting from + LocalKey Key + // AgentKey represents the keypair associated with the Kubernetes agent we're connecting to + AgentKey Key + + // LocalAddress represents the local endpoint address for wireguard + LocalAddress netip.AddrPort + + // OverlayPrefix is the prefix of the overlay network + OverlayPrefix netip.Prefix + + // LocalOverlayAddress is the proxy address inside the overlay network + LocalOverlayAddress netip.Addr + + // AgentOverlayAddress is the agent address inside the overlay network + AgentOverlayAddress netip.Addr + + // AllowedIPs is the set of prefixes allowed to be routed through wireguard + AllowedIPs []netip.Prefix +} + +type WireguardOption func(*Wireguard) error + +func NewWireguardConfig(options ...WireguardOption) (Wireguard, error) { + wg := Wireguard{} + + for _, option := range options { + if err := option(&wg); err != nil { + return wg, err + } + } + + return wg, nil +} + +func WithGeneratedKeypairs() WireguardOption { + return func(wg *Wireguard) error { + localKeypair, err := wgtypes.GeneratePrivateKey() + if err != nil { + return err + } + + wg.LocalKey = Key{localKeypair} + + agentKeypair, err := wgtypes.GeneratePrivateKey() + if err != nil { + return err + } + + wg.AgentKey = Key{agentKeypair} + + return nil + } +} + +func WithLocalAddress(address netip.AddrPort) WireguardOption { + return func(wg *Wireguard) error { + wg.LocalAddress = address + return nil + } +} + +func WithDirectAccess(directAccess bool) WireguardOption { + return func(wg *Wireguard) error { + wg.DirectAccess = directAccess + return nil + } +} + +func WithOverlay(overlay, localAddress, agentAddress string) WireguardOption { + return func(wg *Wireguard) (err error) { + wg.OverlayPrefix, err = netip.ParsePrefix(overlay) + if err != nil { + return + } + + wg.LocalOverlayAddress, err = netip.ParseAddr(localAddress) + if err != nil { + return + } + + wg.AgentOverlayAddress, err = netip.ParseAddr(agentAddress) + + return + } +} + +func WithAllowedIPs(allowedIPs ...string) WireguardOption { + return func(wg *Wireguard) error { + for _, allowedIP := range allowedIPs { + prefix, err := netip.ParsePrefix(allowedIP) + if err != nil { + return err + } + + wg.AllowedIPs = append(wg.AllowedIPs, prefix) + } + + return nil + } +} diff --git a/pkg/config/key.go b/pkg/config/key.go new file mode 100644 index 0000000..ce49d4b --- /dev/null +++ b/pkg/config/key.go @@ -0,0 +1,25 @@ +package config + +import ( + "encoding" + + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" +) + +// Key wraps wgtypes.Key to allow readable marshal to YAML +type Key struct { + wgtypes.Key +} + +var _ encoding.TextMarshaler = Key{} + +// MarshalText implements the TextMarshaler interface +func (k Key) MarshalText() ([]byte, error) { + return []byte(k.String()), nil +} + +// UnmarshalText implements the TextMarshaler interface +func (k *Key) UnmarshalText(text []byte) (err error) { + k.Key, err = wgtypes.ParseKey(string(text)) + return +} diff --git a/pkg/config/version.go b/pkg/config/version.go new file mode 100644 index 0000000..208cde0 --- /dev/null +++ b/pkg/config/version.go @@ -0,0 +1,3 @@ +package config + +var Version = "latest" diff --git a/pkg/kuberneteshelpers/client.go b/pkg/kuberneteshelpers/client.go new file mode 100644 index 0000000..5a4e982 --- /dev/null +++ b/pkg/kuberneteshelpers/client.go @@ -0,0 +1,26 @@ +package kuberneteshelpers + +import ( + "fmt" + + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/utils/ptr" +) + +func ClientConfig(kubeconfig string) (kubernetes.Interface, *rest.Config, error) { + clientGetter := &genericclioptions.ConfigFlags{KubeConfig: ptr.To(kubeconfig)} + + restConfig, err := clientGetter.ToRESTConfig() + if err != nil { + return nil, nil, fmt.Errorf("unable to create kubernetes REST config: %w", err) + } + + client, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return nil, nil, fmt.Errorf("unable to create kubernetes client: %w", err) + } + + return client, restConfig, nil +} diff --git a/pkg/kuberneteshelpers/cluster.go b/pkg/kuberneteshelpers/cluster.go new file mode 100644 index 0000000..d5b9b96 --- /dev/null +++ b/pkg/kuberneteshelpers/cluster.go @@ -0,0 +1,109 @@ +package kuberneteshelpers + +import ( + "context" + "fmt" + "net/netip" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +type ClusterDetails struct { + ServiceIP netip.Addr + PodCIDR, ServiceCIDR, NodeCIDR netip.Prefix +} + +func (c ClusterDetails) Resolve(ctx context.Context, client kubernetes.Interface, namespace string) (ClusterDetails, error) { + pods, err := client.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{}) + if err != nil { + return ClusterDetails{}, fmt.Errorf("unable to list pods for cluster details: %w", err) + } + + podCIDR := c.PodCIDR + if !podCIDR.IsValid() { + podAddr, err := func() (netip.Addr, error) { + // Find the first non-hostNetwork pod and assume that represents the pod CIDR as a /16 + for _, p := range pods.Items { + if p.Spec.HostNetwork || p.Status.PodIP == "" { + continue + } + + addr, err := netip.ParseAddr(p.Status.PodIP) + if err != nil { + continue + } + + return addr, nil + } + + return netip.Addr{}, fmt.Errorf("unable to find any pods") + }() + if err != nil { + return ClusterDetails{}, fmt.Errorf("unable to obtain pod CIDR: %w", err) + } + + podCIDR, err = podAddr.Prefix(16) + if err != nil { + return ClusterDetails{}, fmt.Errorf("unable to obtain pod CIDR: %w", err) + } + } + + dnsService, err := client.CoreV1().Services("kube-system").Get(ctx, "kube-dns", v1.GetOptions{}) + if err != nil { + return ClusterDetails{}, fmt.Errorf("unable to find kube-dns service for service CIDR: %w", err) + } + + serviceAddr, err := netip.ParseAddr(dnsService.Spec.ClusterIP) + if err != nil { + return ClusterDetails{}, fmt.Errorf("unable to obtain service CIDR: %w", err) + } + + serviceCIDR := c.ServiceCIDR + if !serviceCIDR.IsValid() { + serviceCIDR, err = serviceAddr.Prefix(16) + if err != nil { + return ClusterDetails{}, fmt.Errorf("unable to obtain service CIDR: %w", err) + } + } + + nodeCIDR := c.NodeCIDR + if !nodeCIDR.IsValid() { + nodes, err := client.CoreV1().Nodes().List(ctx, v1.ListOptions{}) + if err != nil { + return ClusterDetails{}, fmt.Errorf("unable to list nodes for cluster details: %w", err) + } + + if len(nodes.Items) < 1 { + return ClusterDetails{}, fmt.Errorf("no nodes found for cluster details: %w", err) + } + + nodeCIDR, err = func() (netip.Prefix, error) { + for _, node := range nodes.Items { + for _, address := range node.Status.Addresses { + if address.Type == corev1.NodeInternalIP { + nodeAddr, err := netip.ParseAddr(address.Address) + if err != nil { + continue + } + + return nodeAddr.Prefix(16) + } + } + } + + return netip.Prefix{}, fmt.Errorf("no valid InternalIP node addresses found") + }() + if err != nil { + return ClusterDetails{}, fmt.Errorf("unable to obtain node CIDR: %w", err) + } + } + + return ClusterDetails{ + ServiceIP: serviceAddr, + ServiceCIDR: serviceCIDR, + PodCIDR: podCIDR, + NodeCIDR: nodeCIDR, + }, nil +} diff --git a/pkg/kuberneteshelpers/cluster_test.go b/pkg/kuberneteshelpers/cluster_test.go new file mode 100644 index 0000000..d0d4c25 --- /dev/null +++ b/pkg/kuberneteshelpers/cluster_test.go @@ -0,0 +1,238 @@ +package kuberneteshelpers + +import ( + "context" + "net/netip" + "reflect" + "testing" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" +) + +func TestGetClusterDetails(t *testing.T) { + namespace := "test-ns" + + validPod := &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{Name: "test-pod", Namespace: namespace}, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{PodIP: "100.64.0.1"}, + } + + kubeDNS := &corev1.Service{ + ObjectMeta: v1.ObjectMeta{Name: "kube-dns", Namespace: "kube-system"}, + Spec: corev1.ServiceSpec{ + ClusterIP: "172.0.0.1", + }, + } + + validNode := &corev1.Node{ + ObjectMeta: v1.ObjectMeta{Name: "node-1"}, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeExternalIP, + Address: "34.123.34.12", + }, + { + Type: corev1.NodeInternalIP, + Address: "10.0.0.1", + }, + }, + }, + } + + validClusterDetails := ClusterDetails{ + ServiceIP: netip.MustParseAddr("172.0.0.1"), + PodCIDR: netip.MustParsePrefix("100.64.0.0/16"), + ServiceCIDR: netip.MustParsePrefix("172.0.0.0/16"), + NodeCIDR: netip.MustParsePrefix("10.0.0.0/16"), + } + + tests := []struct { + name string + objects []runtime.Object + input ClusterDetails + want ClusterDetails + wantErr bool + }{ + { + "only host network pods", + []runtime.Object{ + &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{Name: "test-pod-host-network", Namespace: namespace}, + Spec: corev1.PodSpec{HostNetwork: true}, + Status: corev1.PodStatus{}, + }, + kubeDNS, + validNode, + }, + ClusterDetails{}, + ClusterDetails{}, + true, + }, + { + "only pods with no IP", + []runtime.Object{ + &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{Name: "test-pod-no-pod-ip", Namespace: namespace}, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{}, + }, + kubeDNS, + validNode, + }, + ClusterDetails{}, + ClusterDetails{}, + true, + }, + { + "only pods with invalid IP", + []runtime.Object{ + &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{Name: "test-pod-invalid-ip", Namespace: namespace}, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{PodIP: "100."}, + }, + kubeDNS, + validNode, + }, + ClusterDetails{}, + ClusterDetails{}, + true, + }, + { + "pods with invalid IP", + []runtime.Object{ + &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{Name: "test-pod-invalid-ip", Namespace: namespace}, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{PodIP: "100."}, + }, + validPod, + kubeDNS, + validNode, + }, + ClusterDetails{}, + validClusterDetails, + false, + }, + { + "no kube-dns", + []runtime.Object{validPod, validNode}, + ClusterDetails{}, + ClusterDetails{}, + true, + }, + { + "kube-dns with invalid ClusterIP", + []runtime.Object{ + &corev1.Service{ + ObjectMeta: v1.ObjectMeta{Name: "kube-dns", Namespace: "kube-system"}, + Spec: corev1.ServiceSpec{ + ClusterIP: "", + }, + }, + validPod, + validNode, + }, + ClusterDetails{}, + ClusterDetails{}, + true, + }, + { + "no nodes", + []runtime.Object{validPod, kubeDNS}, + ClusterDetails{}, + ClusterDetails{}, + true, + }, + { + "only nodes with external IP", + []runtime.Object{ + validPod, + kubeDNS, + &corev1.Node{ + ObjectMeta: v1.ObjectMeta{Name: "node-1"}, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{{Type: corev1.NodeExternalIP, Address: "34.123.34.12"}}, + }, + }, + }, + ClusterDetails{}, + ClusterDetails{}, + true, + }, + { + "only nodes with invalid IP", + []runtime.Object{ + validPod, + kubeDNS, + &corev1.Node{ + ObjectMeta: v1.ObjectMeta{Name: "node-1"}, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{{Type: corev1.NodeInternalIP, Address: "100."}}, + }, + }, + }, + ClusterDetails{}, + ClusterDetails{}, + true, + }, + { + "nodes with invalid IP", + []runtime.Object{ + validPod, + kubeDNS, + &corev1.Node{ + ObjectMeta: v1.ObjectMeta{Name: "node-0"}, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{{Type: corev1.NodeInternalIP, Address: "100."}}, + }, + }, + validNode, + }, + ClusterDetails{}, + validClusterDetails, + false, + }, + { + "valid cluster", + []runtime.Object{validPod, kubeDNS, validNode}, + ClusterDetails{}, + validClusterDetails, + false, + }, + { + "prefilled CIDRs", + []runtime.Object{kubeDNS}, + ClusterDetails{ + PodCIDR: netip.MustParsePrefix("100.64.0.0/24"), + ServiceCIDR: netip.MustParsePrefix("172.0.0.0/12"), + NodeCIDR: netip.MustParsePrefix("10.0.0.0/12"), + }, + ClusterDetails{ + ServiceIP: netip.MustParseAddr("172.0.0.1"), + PodCIDR: netip.MustParsePrefix("100.64.0.0/24"), + ServiceCIDR: netip.MustParsePrefix("172.0.0.0/12"), + NodeCIDR: netip.MustParsePrefix("10.0.0.0/12"), + }, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.input.Resolve(context.Background(), fake.NewClientset(tt.objects...), namespace) + if (err != nil) != tt.wantErr { + t.Errorf("GetClusterDetails() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("GetClusterDetails() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/kuberneteshelpers/object.go b/pkg/kuberneteshelpers/object.go new file mode 100644 index 0000000..bf97c80 --- /dev/null +++ b/pkg/kuberneteshelpers/object.go @@ -0,0 +1,45 @@ +package kuberneteshelpers + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/utils/ptr" +) + +var runtimeScheme = runtime.NewScheme() + +func init() { + utilruntime.Must(appsv1.AddToScheme(runtimeScheme)) +} + +func ResolveObject(kubeconfig string, namespace string, targetObject []string) (runtime.Object, error) { + clientGetter := &genericclioptions.ConfigFlags{KubeConfig: ptr.To(kubeconfig)} + resourceResult := resource.NewBuilder(clientGetter). + WithScheme(runtimeScheme, runtimeScheme.PrioritizedVersionsAllGroups()...). + NamespaceParam(namespace).DefaultNamespace(). + ResourceTypeOrNameArgs(true, targetObject...). + SingleResourceType(). + RequireObject(true). + Flatten(). + Do() + + if err := resourceResult.Err(); err != nil { + return nil, fmt.Errorf("no targets found: %w", err) + } + + if !resourceResult.TargetsSingleItems() { + return nil, fmt.Errorf("no targets found") + } + + object, err := resourceResult.Object() + if err != nil { + return nil, fmt.Errorf("unable to fetch target resource: %w", err) + } + + return object, nil +} diff --git a/pkg/kuberneteshelpers/podexec.go b/pkg/kuberneteshelpers/podexec.go new file mode 100644 index 0000000..5b5bb97 --- /dev/null +++ b/pkg/kuberneteshelpers/podexec.go @@ -0,0 +1,66 @@ +package kuberneteshelpers + +import ( + "context" + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/client-go/kubernetes/scheme" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/remotecommand" +) + +func FileContents(ctx context.Context, config *rest.Config, pod *corev1.Pod, container, path string) (string, error) { + client, err := corev1client.NewForConfig(config) + if err != nil { + return "", err + } + + req := client.RESTClient().Post(). + Resource("pods"). + Name(pod.Name). + Namespace(pod.Namespace). + SubResource("exec") + + req.VersionedParams(&corev1.PodExecOptions{ + Container: container, + Command: []string{"sh", "-c", fmt.Sprintf("cat %s", path)}, + Stdin: false, + Stdout: true, + Stderr: false, + TTY: false, + }, scheme.ParameterCodec) + + websocketExec, err := remotecommand.NewWebSocketExecutor(config, "GET", req.URL().String()) + if err != nil { + return "", err + } + + exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL()) + if err != nil { + return "", err + } + + exec, err = remotecommand.NewFallbackExecutor(websocketExec, exec, httpstream.IsUpgradeFailure) + if err != nil { + return "", err + } + + var out strings.Builder + + err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{ + Stdin: nil, + Stdout: &out, + Stderr: nil, + Tty: false, + TerminalSizeQueue: nil, + }) + if err != nil { + return "", err + } + + return out.String(), nil +} diff --git a/pkg/nat/discovery.go b/pkg/nat/discovery.go new file mode 100644 index 0000000..e9889df --- /dev/null +++ b/pkg/nat/discovery.go @@ -0,0 +1,68 @@ +package nat + +import ( + "context" + "fmt" + "time" + + "github.com/go-logr/logr" + "github.com/pion/ice/v3" + "github.com/pion/stun/v2" +) + +const timeout = 30 * time.Second + +func FindLocalAddressAndPort(ctx context.Context) (string, int, error) { + log := logr.FromContextOrDiscard(ctx) + + agent, err := ice.NewAgent(&ice.AgentConfig{ + Urls: []*stun.URI{ + { + Scheme: stun.SchemeTypeSTUN, + Host: "stun.cloudflare.com", + Port: 3478, + }, + { + Scheme: stun.SchemeTypeSTUN, + Host: "stun.l.google.com", + Port: 19302, + }, + // TODO make configurable? + }, + NetworkTypes: []ice.NetworkType{ice.NetworkTypeUDP4}, + CandidateTypes: []ice.CandidateType{ice.CandidateTypeServerReflexive}, + }) + if err != nil { + return "", 0, fmt.Errorf("unable to initialize NAT discovery client: %w", err) + } + + defer func() { + if err := agent.Close(); err != nil { + log.Error(err, "error closing NAT discovery client") + } + }() + + candidates := make(chan ice.Candidate, 1) + + err = agent.OnCandidate(func(candidate ice.Candidate) { + if candidate != nil { + log.V(1).Info("NAT discovery candidate", "candidate", candidate.String()) + candidates <- candidate + } + }) + + if err != nil { + return "", 0, fmt.Errorf("no proxy candidates found: %w", err) + } + + if err := agent.GatherCandidates(); err != nil { + return "", 0, fmt.Errorf("no proxy candidates found: %w", err) + } + + select { + case candidate := <-candidates: + return candidate.Address(), candidate.Port(), nil + case <-time.After(timeout): + return "", 0, fmt.Errorf("no proxy candidates found: timeout after %s", timeout.String()) + } +} diff --git a/pkg/proxy/config.go b/pkg/proxy/config.go new file mode 100644 index 0000000..9269af6 --- /dev/null +++ b/pkg/proxy/config.go @@ -0,0 +1,110 @@ +package proxy + +import ( + "context" + "fmt" + "net/netip" + + "github.com/go-logr/logr" + "k8s.io/client-go/kubernetes" + + "github.com/steved/kubewire/pkg/config" + "github.com/steved/kubewire/pkg/kuberneteshelpers" + "github.com/steved/kubewire/pkg/nat" +) + +var getClusterDetails = func(ctx context.Context, clusterDetails kuberneteshelpers.ClusterDetails, client kubernetes.Interface, namespace string) (kuberneteshelpers.ClusterDetails, error) { + return clusterDetails.Resolve(ctx, client, namespace) +} + +var findLocalAddressAndPort = func(ctx context.Context) (string, int, error) { + return nat.FindLocalAddressAndPort(ctx) +} + +var overlayCIDRs = []netip.Prefix{ + netip.MustParsePrefix("10.1.0.0/28"), + netip.MustParsePrefix("100.64.51.0/28"), +} + +func ResolveWireguardConfig(ctx context.Context, proxyConfig *config.Config, client kubernetes.Interface, overlayPrefix string, directAccess bool) error { + log := logr.FromContextOrDiscard(ctx) + + clusterDetails, err := getClusterDetails(ctx, proxyConfig.KubernetesClusterDetails, client, proxyConfig.Namespace) + if err != nil { + return fmt.Errorf("unable to obtain Kubernetes cluster details: %w", err) + } + + log.V(1).Info( + "Resolved Kubernetes cluster details", + "service_ip", + clusterDetails.ServiceIP, + "service_cidr", + clusterDetails.ServiceCIDR, + "pod_cidr", + clusterDetails.PodCIDR, + "node_cidr", + clusterDetails.NodeCIDR, + ) + + proxyConfig.KubernetesClusterDetails = clusterDetails + + var overlay netip.Prefix + if overlayPrefix != "" { + overlay, err = netip.ParsePrefix(overlayPrefix) + if err != nil { + return fmt.Errorf("unable to parse overlay prefix %q: %w", overlayPrefix, err) + } + } else { + overlay, err = func() (netip.Prefix, error) { + for _, cidr := range overlayCIDRs { + if cidr.Overlaps(clusterDetails.PodCIDR) || cidr.Overlaps(clusterDetails.ServiceCIDR) || cidr.Overlaps(clusterDetails.NodeCIDR) { + continue + } + + return cidr, nil + } + + return netip.Prefix{}, fmt.Errorf("unable to determine non-overlapping CIDR range for overlay network") + }() + if err != nil { + return err + } + } + + log.V(1).Info("Determined overlay prefix", "overlay", overlay.String()) + + proxyAddr := overlay.Addr() + localOverlayAddress := proxyAddr.Next() + agentOverlayAddress := localOverlayAddress.Next() + + options := []config.WireguardOption{ + config.WithGeneratedKeypairs(), + config.WithOverlay(overlay.String(), localOverlayAddress.String(), agentOverlayAddress.String()), + config.WithAllowedIPs(clusterDetails.PodCIDR.String(), clusterDetails.ServiceCIDR.String(), clusterDetails.NodeCIDR.String(), overlay.String()), + } + + if directAccess { + log.V(1).Info("Starting NAT address lookup") + + localIP, localPort, err := findLocalAddressAndPort(ctx) + if err != nil { + return fmt.Errorf("unable to proxy connectable address for NAT traversal: %w", err) + } + + localAddr, err := netip.ParseAddr(localIP) + if err != nil { + return fmt.Errorf("unable to parse local IP: %w", err) + } + + localAddress := netip.AddrPortFrom(localAddr, uint16(localPort)) + options = append(options, config.WithLocalAddress(localAddress), config.WithDirectAccess(directAccess)) + + log.Info("NAT address lookup complete", "address", localAddress) + } else if proxyConfig.Wireguard.LocalAddress.IsValid() { + options = append(options, config.WithLocalAddress(proxyConfig.Wireguard.LocalAddress)) + } + + proxyConfig.Wireguard, err = config.NewWireguardConfig(options...) + + return err +} diff --git a/pkg/proxy/config_test.go b/pkg/proxy/config_test.go new file mode 100644 index 0000000..bb62539 --- /dev/null +++ b/pkg/proxy/config_test.go @@ -0,0 +1,150 @@ +package proxy + +import ( + "context" + "net/netip" + "reflect" + "testing" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + + "github.com/steved/kubewire/pkg/config" + "github.com/steved/kubewire/pkg/kuberneteshelpers" +) + +func TestResolveWireguardConfig(t *testing.T) { + validClusterDetails := kuberneteshelpers.ClusterDetails{ + ServiceIP: netip.MustParseAddr("172.0.0.1"), + PodCIDR: netip.MustParsePrefix("100.64.0.0/16"), + ServiceCIDR: netip.MustParsePrefix("172.0.0.0/16"), + NodeCIDR: netip.MustParsePrefix("10.0.0.0/16"), + } + + overlappingClusterDetails := kuberneteshelpers.ClusterDetails{ + ServiceIP: netip.MustParseAddr("172.0.0.1"), + PodCIDR: netip.MustParsePrefix("100.64.51.0/16"), + ServiceCIDR: netip.MustParsePrefix("172.0.0.0/16"), + NodeCIDR: netip.MustParsePrefix("10.1.0.0/16"), + } + + findLocalAddressAndPort = func(_ context.Context) (string, int, error) { + return "1.2.3.4", 9080, nil + } + + defaultOverlay := netip.MustParsePrefix("10.1.0.0/28") + + tests := []struct { + name string + clusterDetails kuberneteshelpers.ClusterDetails + overlayPrefix string + directAccess bool + want config.Wireguard + wantErr bool + }{ + { + "invalid overlay prefix", + validClusterDetails, + "1.2./1", + false, + config.Wireguard{}, + true, + }, + { + "overlapping CIDR prefixes", + overlappingClusterDetails, + "", + false, + config.Wireguard{}, + true, + }, + { + "valid", + validClusterDetails, + "", + false, + config.Wireguard{ + DirectAccess: false, + LocalAddress: netip.AddrPort{}, + OverlayPrefix: defaultOverlay, + LocalOverlayAddress: netip.MustParseAddr("10.1.0.1"), + AgentOverlayAddress: netip.MustParseAddr("10.1.0.2"), + AllowedIPs: []netip.Prefix{ + validClusterDetails.PodCIDR, + validClusterDetails.ServiceCIDR, + validClusterDetails.NodeCIDR, + defaultOverlay, + }, + }, + false, + }, + { + "overlay prefix", + validClusterDetails, + "192.168.0.0/16", + false, + config.Wireguard{ + DirectAccess: false, + LocalAddress: netip.AddrPort{}, + OverlayPrefix: netip.MustParsePrefix("192.168.0.0/16"), + LocalOverlayAddress: netip.MustParseAddr("192.168.0.1"), + AgentOverlayAddress: netip.MustParseAddr("192.168.0.2"), + AllowedIPs: []netip.Prefix{ + validClusterDetails.PodCIDR, + validClusterDetails.ServiceCIDR, + validClusterDetails.NodeCIDR, + netip.MustParsePrefix("192.168.0.0/16"), + }, + }, + false, + }, + { + "direct access", + validClusterDetails, + "", + true, + config.Wireguard{ + DirectAccess: true, + LocalAddress: netip.MustParseAddrPort("1.2.3.4:9080"), + OverlayPrefix: defaultOverlay, + LocalOverlayAddress: netip.MustParseAddr("10.1.0.1"), + AgentOverlayAddress: netip.MustParseAddr("10.1.0.2"), + AllowedIPs: []netip.Prefix{ + validClusterDetails.PodCIDR, + validClusterDetails.ServiceCIDR, + validClusterDetails.NodeCIDR, + defaultOverlay, + }, + }, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + getClusterDetails = func(_ context.Context, _ kuberneteshelpers.ClusterDetails, _ kubernetes.Interface, _ string) (kuberneteshelpers.ClusterDetails, error) { + return tt.clusterDetails, nil + } + + cfg := &config.Config{} + if err := ResolveWireguardConfig(context.Background(), cfg, fake.NewClientset(), tt.overlayPrefix, tt.directAccess); (err != nil) != tt.wantErr { + t.Errorf("ResolveWireguardConfig() error = %v, expected %v", err, tt.wantErr) + return + } + + if cfg.KubernetesClusterDetails != tt.clusterDetails { + t.Errorf("ResolveWireguardConfig() cluster details got = %v, want %v", cfg.KubernetesClusterDetails, tt.clusterDetails) + } + + if cfg.Wireguard.AgentKey.String() == "" || cfg.Wireguard.LocalKey.String() == "" { + t.Errorf("ResolveWireguardConfig() invalid cluster keys got = (%s, %s)", cfg.Wireguard.AgentKey.String(), cfg.Wireguard.LocalKey.String()) + } + + tt.want.AgentKey = cfg.Wireguard.AgentKey + tt.want.LocalKey = cfg.Wireguard.LocalKey + + if !reflect.DeepEqual(cfg.Wireguard, tt.want) { + t.Errorf("ResolveWireguardConfig() got = %v, want %v", cfg.Wireguard, tt.want) + } + }) + } +} diff --git a/pkg/proxy/proxy.go b/pkg/proxy/proxy.go new file mode 100644 index 0000000..ee14b97 --- /dev/null +++ b/pkg/proxy/proxy.go @@ -0,0 +1,130 @@ +package proxy + +import ( + "context" + "net/netip" + "os" + "os/signal" + "syscall" + + "github.com/go-logr/logr" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + "github.com/steved/kubewire/pkg/agent" + "github.com/steved/kubewire/pkg/config" + "github.com/steved/kubewire/pkg/routing" + "github.com/steved/kubewire/pkg/runnable" + "github.com/steved/kubewire/pkg/wg" +) + +var stopFuncs []runnable.StopFunc + +func Run(ctx context.Context, cfg *config.Config, kubernetesClient kubernetes.Interface, kubernetesRestConfig *rest.Config) error { + log := logr.FromContextOrDiscard(ctx) + + defer func() { + for _, stop := range stopFuncs { + stop() + } + }() + + if cfg.Wireguard.LocalAddress.IsValid() { + if err := wireguardDeviceSetup(ctx, cfg, netip.AddrPort{}); err != nil { + return err + } + + if _, err := kubernetesSetup(ctx, cfg, kubernetesClient, kubernetesRestConfig); err != nil { + return err + } + } else { + agentAddress, err := kubernetesSetup(ctx, cfg, kubernetesClient, kubernetesRestConfig) + if err != nil { + return err + } + + if err := wireguardDeviceSetup(ctx, cfg, agentAddress); err != nil { + return err + } + } + + log.Info("Started. Use Ctrl-C to exit...") + + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + <-sigCh + + return nil +} + +func wireguardDeviceSetup(ctx context.Context, cfg *config.Config, agentAddress netip.AddrPort) error { + log := logr.FromContextOrDiscard(ctx) + + log.V(1).Info("Starting Wireguard device setup") + + listenPort := 0 + if cfg.Wireguard.LocalAddress.IsValid() { + listenPort = int(cfg.Wireguard.LocalAddress.Port()) + } + + wireguardDevice := wg.NewWireguardDevice(wg.WireguardDeviceConfig{ + Peer: wg.WireguardDevicePeer{ + Endpoint: agentAddress, + PublicKey: cfg.Wireguard.AgentKey.PublicKey(), + AllowedIPs: cfg.Wireguard.AllowedIPs, + }, + PrivateKey: cfg.Wireguard.LocalKey.Key, + ListenPort: listenPort, + Address: cfg.Wireguard.LocalOverlayAddress, + }) + + wgStop, err := wireguardDevice.Start(ctx) + if err != nil { + return err + } + + stopFuncs = append(stopFuncs, wgStop) + + log.Info("Wireguard device setup complete") + + log.V(1).Info("Starting route setup") + + router := routing.NewRouting( + wireguardDevice.DeviceName(), + cfg.KubernetesClusterDetails.ServiceIP, + cfg.KubernetesClusterDetails.PodCIDR, + cfg.KubernetesClusterDetails.ServiceCIDR, + cfg.KubernetesClusterDetails.NodeCIDR, + netip.PrefixFrom(cfg.Wireguard.AgentOverlayAddress, 32), + ) + + routerStop, err := router.Start(ctx) + if err != nil { + return err + } + + stopFuncs = append(stopFuncs, routerStop) + + log.Info("Routing setup complete") + + return nil +} + +func kubernetesSetup(ctx context.Context, cfg *config.Config, kubernetesClient kubernetes.Interface, kubernetesRestConfig *rest.Config) (netip.AddrPort, error) { + log := logr.FromContextOrDiscard(ctx) + + log.V(1).Info("Starting Kubernetes setup") + + kubernetesAgent := agent.NewKubernetesAgent(cfg, kubernetesClient, kubernetesRestConfig) + + agentStop, err := kubernetesAgent.Start(ctx) + if err != nil { + return netip.AddrPort{}, err + } + + stopFuncs = append(stopFuncs, agentStop) + + log.Info("Kubernetes setup complete") + + return kubernetesAgent.AgentAddress(), nil +} diff --git a/pkg/routing/routing.go b/pkg/routing/routing.go new file mode 100644 index 0000000..5c37657 --- /dev/null +++ b/pkg/routing/routing.go @@ -0,0 +1,17 @@ +package routing + +import ( + "net/netip" + + "github.com/steved/kubewire/pkg/runnable" +) + +type routing struct { + deviceName string + routes []netip.Prefix + dnsServer netip.Addr +} + +func NewRouting(deviceName string, dnsServer netip.Addr, routes ...netip.Prefix) runnable.Runnable { + return &routing{deviceName: deviceName, dnsServer: dnsServer, routes: routes} +} diff --git a/pkg/routing/routing_darwin.go b/pkg/routing/routing_darwin.go new file mode 100644 index 0000000..3d41150 --- /dev/null +++ b/pkg/routing/routing_darwin.go @@ -0,0 +1,69 @@ +//go:build darwin + +package routing + +import ( + "context" + "errors" + "fmt" + "os" + "os/exec" + + "github.com/go-logr/logr" + + "github.com/steved/kubewire/pkg/runnable" +) + +func (r *routing) Start(ctx context.Context) (runnable.StopFunc, error) { + for _, route := range r.routes { + rt := exec.Command("route", "add", "-net", route.String(), "-interface", r.deviceName) + if _, err := rt.CombinedOutput(); err != nil { + return nil, fmt.Errorf("unable to add route for %s: %w", route.String(), err) + } + } + + if r.dnsServer.IsValid() { + if err := os.MkdirAll("/etc/resolver", 0o755); err != nil { + return nil, fmt.Errorf("unable to create /etc/resolver: %w", err) + } + + contents := []byte(fmt.Sprintf("domain cluster.local\nnameserver %s\nsearch svc.cluster.local cluster.local local", r.dnsServer.String())) + + err := os.WriteFile("/etc/resolver/cluster.local", contents, 0o644) + if err != nil { + return nil, fmt.Errorf("unable to create /etc/resolver/cluster.local: %w", err) + } + + _, err = exec.Command("defaults", "write", "/Library/Preferences/com.apple.mDNSResponder.plist", "AlwaysAppendSearchDomains", "-bool", "yes").CombinedOutput() + if err != nil { + return nil, fmt.Errorf("unable to enable mDNSResponder AlwaysAppendSearchDomains: %w", err) + } + + _, err = exec.Command("killall", "mDNSResponder").CombinedOutput() + if err != nil { + return nil, fmt.Errorf("unable to restart mDNSResponder: %w", err) + } + } + + return func() { + log := logr.FromContextOrDiscard(ctx) + + var errs []error + + if _, err := exec.Command("defaults", "write", "/Library/Preferences/com.apple.mDNSResponder.plist", "AlwaysAppendSearchDomains", "-bool", "no").CombinedOutput(); err != nil { + errs = append(errs, fmt.Errorf("unable to disable mDNSResponder AlwaysAppendSearchDomains: %w", err)) + } + + if _, err := exec.Command("killall", "mDNSResponder").CombinedOutput(); err != nil { + errs = append(errs, fmt.Errorf("unable to restart mDNSResponder: %w", err)) + } + + if err := os.Remove("/etc/resolver/cluster.local"); err != nil && !os.IsNotExist(err) { + errs = append(errs, fmt.Errorf("unable to remove /etc/resolver/cluster.local: %w", err)) + } + + if len(errs) > 0 { + log.Error(errors.Join(errs...), "unable to clean up routing") + } + }, nil +} diff --git a/pkg/routing/routing_linux.go b/pkg/routing/routing_linux.go new file mode 100644 index 0000000..4a8a99a --- /dev/null +++ b/pkg/routing/routing_linux.go @@ -0,0 +1,118 @@ +//go:build linux + +package routing + +import ( + "context" + "fmt" + "net" + "syscall" + + "github.com/go-logr/logr" + "github.com/godbus/dbus/v5" + "github.com/jsimonetti/rtnetlink" + "golang.org/x/sys/unix" + + "github.com/steved/kubewire/pkg/runnable" +) + +type resolvedLinkDNS struct { + Family int + IP [4]byte +} + +type resolvedLinkDomain struct { + Name string + RoutingOnly bool +} + +func (r *routing) Start(ctx context.Context) (runnable.StopFunc, error) { + log := logr.FromContextOrDiscard(ctx) + + netlink, err := rtnetlink.Dial(nil) + if err != nil { + return nil, fmt.Errorf("unable to initialize netlink client: %w", err) + } + + defer func() { + if err := netlink.Close(); err != nil { + log.Error(err, "unable to close netlink client") + } + }() + + iface, err := net.InterfaceByName(r.deviceName) + if err != nil { + return nil, fmt.Errorf("unable to find wireguard interface %q: %w", r.deviceName, err) + } + + for _, route := range r.routes { + ip := net.IP(route.Addr().AsSlice()) + + err = netlink.Route.Add(&rtnetlink.RouteMessage{ + Family: unix.AF_INET, + Table: unix.RT_TABLE_MAIN, + Protocol: unix.RTPROT_STATIC, + Scope: unix.RT_SCOPE_LINK, + Type: unix.RTN_UNICAST, + DstLength: uint8(route.Bits()), + Attributes: rtnetlink.RouteAttributes{ + Dst: ip, + OutIface: uint32(iface.Index), + Gateway: net.ParseIP("0.0.0.0"), + }, + }) + if err != nil { + return nil, fmt.Errorf("unable to add route for %s: %w", route.String(), err) + } + } + + if r.dnsServer.IsValid() { + dbusClient, err := dbus.ConnectSystemBus() + if err != nil { + return nil, fmt.Errorf("unable to create dbus client: %w", err) + } + + defer func() { + if err := dbusClient.Close(); err != nil { + log.Error(err, "unable to close dbus client") + } + }() + + resolved := dbusClient.Object("org.freedesktop.resolve1", "/org/freedesktop/resolve1") + + err = resolved.CallWithContext( + ctx, + "org.freedesktop.resolve1.Manager.SetLinkDNS", + 0, + iface.Index, + []resolvedLinkDNS{{Family: syscall.AF_INET, IP: r.dnsServer.As4()}}, + ).Err + if err != nil { + return nil, fmt.Errorf("unable to set DNS for %q: %w", r.deviceName, err) + } + + err = resolved.CallWithContext( + ctx, + "org.freedesktop.resolve1.Manager.SetLinkDomains", + 0, + iface.Index, + []resolvedLinkDomain{{Name: "svc.cluster.local", RoutingOnly: false}}, + ).Err + if err != nil { + return nil, fmt.Errorf("unable to set DNS for %q: %w", r.deviceName, err) + } + + err = resolved.CallWithContext( + ctx, + "org.freedesktop.resolve1.Manager.SetLinkDefaultRoute", + 0, + iface.Index, + false, + ).Err + if err != nil { + return nil, fmt.Errorf("unable to set DNS for %q: %w", r.deviceName, err) + } + } + + return func() {}, nil +} diff --git a/pkg/runnable/runnable.go b/pkg/runnable/runnable.go new file mode 100644 index 0000000..f12097f --- /dev/null +++ b/pkg/runnable/runnable.go @@ -0,0 +1,9 @@ +package runnable + +import "context" + +type StopFunc = func() + +type Runnable interface { + Start(context.Context) (StopFunc, error) +} diff --git a/pkg/wg/device.go b/pkg/wg/device.go new file mode 100644 index 0000000..59b2da4 --- /dev/null +++ b/pkg/wg/device.go @@ -0,0 +1,47 @@ +package wg + +import ( + "net/netip" + "time" + + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" + + "github.com/steved/kubewire/pkg/runnable" +) + +const ( + PersistentKeepaliveInterval = 25 * time.Second + DefaultWireguardPort = 19070 + defaultDeviceName = "wg0" +) + +type WireguardDevicePeer struct { + Endpoint netip.AddrPort + PublicKey wgtypes.Key + AllowedIPs []netip.Prefix +} + +type WireguardDeviceConfig struct { + Peer WireguardDevicePeer + PrivateKey wgtypes.Key + ListenPort int + Address netip.Addr +} + +type WireguardDevice interface { + runnable.Runnable + DeviceName() string +} + +type wireguardDevice struct { + config WireguardDeviceConfig + deviceName string +} + +func NewWireguardDevice(cfg WireguardDeviceConfig) WireguardDevice { + return &wireguardDevice{config: cfg, deviceName: defaultDeviceName} +} + +func (w *wireguardDevice) DeviceName() string { + return w.deviceName +} diff --git a/pkg/wg/device_darwin.go b/pkg/wg/device_darwin.go new file mode 100644 index 0000000..e144fe8 --- /dev/null +++ b/pkg/wg/device_darwin.go @@ -0,0 +1,127 @@ +//go:build darwin + +package wg + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "os/exec" + "strings" + + "github.com/go-logr/logr" + "github.com/tailscale/wireguard-go/conn" + "github.com/tailscale/wireguard-go/device" + "github.com/tailscale/wireguard-go/ipc" + "github.com/tailscale/wireguard-go/tun" + + "github.com/steved/kubewire/pkg/runnable" +) + +func (w *wireguardDevice) Start(ctx context.Context) (runnable.StopFunc, error) { + log := logr.FromContextOrDiscard(ctx) + + tunDev, err := tun.CreateTUN("utun", device.DefaultMTU) + if err != nil { + return nil, fmt.Errorf("unable to create utun device: %w", err) + } + + w.deviceName, err = tunDev.Name() + if err != nil { + return nil, fmt.Errorf("unable to obtain utun device name: %w", err) + } + + ipcDev, err := ipc.UAPIOpen(w.deviceName) + if err != nil { + return nil, fmt.Errorf("unable to create proxy socket: %w", err) + } + + deviceLogger := &device.Logger{ + Verbosef: func(format string, args ...any) { + log.V(1).Info(fmt.Sprintf(format, args...)) + }, + Errorf: func(format string, args ...any) { + log.Error(nil, fmt.Sprintf(format, args...)) + }, + } + + dev := device.NewDevice(tunDev, conn.NewDefaultBind(), deviceLogger) + + ipcListener, err := ipc.UAPIListen(w.deviceName, ipcDev) + if err != nil { + return nil, fmt.Errorf("unable to create proxy socket listener: %w", err) + } + + go func() { + for { + newConn, err := ipcListener.Accept() + + log.V(1).Info("accepting new connection") + + if err != nil { + log.V(1).Info("unable to accept new connection", "error", err.Error()) + } else { + go dev.IpcHandle(newConn) + } + } + }() + + var replacePeerConfig strings.Builder + + replacePeerConfig.WriteString("replace_peers=true\n") + replacePeerConfig.WriteString(fmt.Sprintf("public_key=%s\n", hex.EncodeToString(w.config.Peer.PublicKey[:]))) + + if w.config.Peer.Endpoint.IsValid() { + replacePeerConfig.WriteString(fmt.Sprintf("endpoint=%s\n", w.config.Peer.Endpoint.String())) + replacePeerConfig.WriteString(fmt.Sprintf("persistent_keepalive_interval=%d\n", int(PersistentKeepaliveInterval.Seconds()))) + } + + for _, ip := range w.config.Peer.AllowedIPs { + replacePeerConfig.WriteString(fmt.Sprintf("allowed_ip=%s\n", ip.String())) + } + + err = dev.IpcSet(replacePeerConfig.String()) + if err != nil { + return nil, fmt.Errorf("unable to configure wireguard device with new peer: %w", err) + } + + err = dev.IpcSet(fmt.Sprintf("private_key=%s", hex.EncodeToString(w.config.PrivateKey[:]))) + if err != nil { + return nil, fmt.Errorf("unable to setup %s: %w", w.deviceName, err) + } + + listenPort := DefaultWireguardPort + if w.config.ListenPort < 0 { + listenPort = 0 + } else if w.config.ListenPort != 0 { + listenPort = w.config.ListenPort + } + + err = dev.IpcSet(fmt.Sprintf("listen_port=%d", listenPort)) + if err != nil { + return nil, fmt.Errorf("unable to setup %s: %w", w.deviceName, err) + } + + output, err := exec.Command("ifconfig", w.deviceName, "inet", w.config.Address.String(), w.config.Address.String()).CombinedOutput() + if err != nil { + return nil, fmt.Errorf("unable to setup %s with ifconfig (%w): %s", w.deviceName, err, string(output)) + } + + output, err = exec.Command("ifconfig", w.deviceName, "up").CombinedOutput() + if err != nil { + return nil, fmt.Errorf("unable to setup %s with ifconfig (%w): %s", w.deviceName, err, string(output)) + } + + return func() { + err := errors.Join( + ipcListener.Close(), + ipcDev.Close(), + tunDev.Close(), + ) + + if err != nil { + log.Error(err, "unable to cleanly terminate wireguard device") + } + }, nil +} diff --git a/pkg/wg/device_linux.go b/pkg/wg/device_linux.go new file mode 100644 index 0000000..408cda2 --- /dev/null +++ b/pkg/wg/device_linux.go @@ -0,0 +1,125 @@ +//go:build linux + +package wg + +import ( + "context" + "fmt" + "net" + "syscall" + + "github.com/go-logr/logr" + "github.com/jsimonetti/rtnetlink" + "golang.org/x/sys/unix" + "golang.zx2c4.com/wireguard/wgctrl" + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" + "k8s.io/utils/ptr" + + "github.com/steved/kubewire/pkg/runnable" +) + +func (w *wireguardDevice) Start(ctx context.Context) (runnable.StopFunc, error) { + log := logr.FromContextOrDiscard(ctx) + + conn, err := rtnetlink.Dial(nil) + if err != nil { + return nil, fmt.Errorf("unable to initialize netlink client: %w", err) + } + + err = conn.Link.New(&rtnetlink.LinkMessage{ + Family: syscall.AF_UNSPEC, + Flags: unix.IFF_UP, + Attributes: &rtnetlink.LinkAttributes{ + Name: w.deviceName, + Info: &rtnetlink.LinkInfo{Kind: "wireguard"}, + }, + }) + if err != nil { + return nil, fmt.Errorf("unable to create wireguard interface: %w", err) + } + + iface, err := net.InterfaceByName(w.deviceName) + if err != nil { + return nil, fmt.Errorf("unable to find created wireguard interface: %w", err) + } + + overlayIP := net.IP(w.config.Address.AsSlice()) + broadcast := net.IPv4(255, 255, 255, 255) + + err = conn.Address.New(&rtnetlink.AddressMessage{ + Family: syscall.AF_INET, + PrefixLength: uint8(32), + Scope: unix.RT_SCOPE_UNIVERSE, + Index: uint32(iface.Index), + Attributes: &rtnetlink.AddressAttributes{ + Address: overlayIP, + Local: overlayIP, + Broadcast: broadcast, + }, + }) + if err != nil { + return nil, fmt.Errorf("unable to add %s to %s: %w", overlayIP, w.deviceName, err) + } + + wgClient, err := wgctrl.New() + if err != nil { + return nil, fmt.Errorf("unable to create wireguard client: %w", err) + } + + defer func() { + if err := wgClient.Close(); err != nil { + log.Error(err, "unable to close wireguard client") + } + }() + + listenPort := DefaultWireguardPort + if w.config.ListenPort < 0 { + listenPort = 0 + } else if w.config.ListenPort != 0 { + listenPort = w.config.ListenPort + } + + if err := wgClient.ConfigureDevice(w.deviceName, wgtypes.Config{ + ListenPort: ptr.To(listenPort), + PrivateKey: ptr.To(w.config.PrivateKey), + }); err != nil { + return nil, fmt.Errorf("unable to configure wireguard: %w", err) + } + + allowedIPs := make([]net.IPNet, len(w.config.Peer.AllowedIPs)) + for i, ip := range w.config.Peer.AllowedIPs { + allowedIPs[i] = net.IPNet{ + IP: ip.Addr().AsSlice(), + Mask: net.CIDRMask(ip.Bits(), ip.Addr().BitLen()), + } + } + + var endpoint *net.UDPAddr + if w.config.Peer.Endpoint.IsValid() { + endpoint = net.UDPAddrFromAddrPort(w.config.Peer.Endpoint) + } + + peer := wgtypes.PeerConfig{ + PublicKey: w.config.Peer.PublicKey, + PersistentKeepaliveInterval: ptr.To(PersistentKeepaliveInterval), + AllowedIPs: allowedIPs, + Endpoint: endpoint, + } + + if err := wgClient.ConfigureDevice(w.deviceName, wgtypes.Config{ + ReplacePeers: true, + Peers: []wgtypes.PeerConfig{peer}, + }); err != nil { + return nil, fmt.Errorf("unable to configure wireguard with peer: %w", err) + } + + return func() { + if err := conn.Link.Delete(uint32(iface.Index)); err != nil { + log.Error(err, "unable to delete interface", "w.deviceName", iface.Name) + } + + if err := conn.Close(); err != nil { + log.Error(err, "unable to close netlink client") + } + }, nil +}