Skip to content

Commit

Permalink
Add generic addon deployment config to tilt
Browse files Browse the repository at this point in the history
Signed-off-by: killianmuldoon <[email protected]>
  • Loading branch information
killianmuldoon committed Aug 1, 2022
1 parent a24665d commit 811037e
Show file tree
Hide file tree
Showing 6 changed files with 288 additions and 24 deletions.
159 changes: 157 additions & 2 deletions Tiltfile
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,26 @@ COPY --from=tilt-helper /usr/bin/kubectl /usr/bin/kubectl
},
}

# Create a data structure to hold information about addons.
addons = {
"test-extension": {
"context": "./test/extension",
"image": "gcr.io/k8s-staging-cluster-api/sample-extension",
"container_name": "extension",
"binary_name": "extender",
"namespace": "default",
"live_reload_deps": ["main.go", "handlers"],
"label": "RuntimeSDK",
"kustomize_substitutions": {
"SERVICE_NAMESPACE": "default",
},
"additional_resources": {
"ExtensionConfig": "extensionconfig.yaml",
"ConfigMap": "hookresponses-configmap.yaml",
},
},
}

def ensure_clusterctl():
local("make clusterctl")

Expand Down Expand Up @@ -153,6 +173,29 @@ def load_provider_tiltfiles():
provider_config["go_main"] = "main.go"
providers[provider_name] = provider_config

# load_addon_tiltfiles looks for tilt-addon.yaml files in the repositories listed in "addon_repos" in tilt settings and loads their config.
def load_addon_tiltfiles():
addon_repos = settings.get("addon_repos", [])
for addon in addon_repos:
file = addon + "/tilt-addon.yaml" if os.path.exists(addon + "/tilt-addon.yaml") else addon + "/tilt-addon.json"
if not os.path.exists(file):
fail("Failed to load provider. No tilt-provider.{yaml|json} file found in " + addon)
addon_details = read_yaml(file, default = {})
if type(addon_details) != type([]):
addon_details = [addon_details]
for item in addon_details:
addon_name = item["name"]
addon_config = item["config"]
if "context" in addon_config:
addon_config["context"] = addon + "/" + addon_config["context"]
else:
addon_config["context"] = addon
if "kustomize_config" not in addon_config:
addon_config["kustomize_config"] = True
if "go_main" not in addon_config:
addon_config["go_main"] = "main.go"
addons[addon_name] = addon_config

tilt_helper_dockerfile_header = """
# Tilt image
FROM golang:1.18.3 as tilt-helper
Expand All @@ -169,7 +212,7 @@ WORKDIR /
COPY --from=tilt-helper /start.sh .
COPY --from=tilt-helper /restart.sh .
COPY --from=tilt-helper /go/bin/dlv .
COPY manager .
COPY $binary_name .
"""

# Configures a provider by doing the following:
Expand Down Expand Up @@ -267,6 +310,7 @@ def enable_provider(name, debug):
ref = p.get("image"),
context = context + "/.tiltbuild/bin/",
dockerfile_contents = dockerfile_contents,
build_args = {"binary_name": "manager"},
target = "tilt",
only = "manager",
live_update = [
Expand All @@ -289,6 +333,96 @@ def enable_provider(name, debug):
resource_deps = ["provider_crd"],
)

def deploy_addons():
for name in settings.get("deploy_addons", []):
addon = addons.get(name)
context = addon.get("context")
label = addon.get("label")
go_main = addon.get("go_main", "main.go")
binary_name = addon.get("binary_name")

# Prefix each live reload dependency with context. For example, for if the context is
# test/infra/docker and main.go is listed as a dep, the result is test/infra/docker/main.go. This adjustment is
# needed so Tilt can watch the correct paths for changes.
live_reload_deps = []
for d in addon.get("live_reload_deps", []):
live_reload_deps.append(context + "/" + d)

# Set up a local_resource build of the addon binary. The addon is expected to have a main.go at the top level.
cgo_enabled = "0"
build_options = ""
ldflags = "-extldflags \"-static\""
gcflags = ""
build_env = "CGO_ENABLED={cgo_enabled} GOOS=linux GOARCH={arch}".format(
cgo_enabled = cgo_enabled,
arch = os_arch,
)
build_cmd = "{build_env} go build {build_options} -gcflags '{gcflags}' -ldflags '{ldflags}' -o .tiltbuild/bin/extender {go_main}".format(
build_env = build_env,
build_options = build_options,
gcflags = gcflags,
go_main = go_main,
ldflags = ldflags,
)

local_resource(
name.lower() + "_binary",
cmd = "cd {context};mkdir -p .tiltbuild/bin;{build_cmd}".format(
context = context,
build_cmd = build_cmd,
),
labels = [label, "ALL.binaries"],
deps = live_reload_deps,
)

additional_docker_helper_commands = addon.get("additional_docker_helper_commands", "")
additional_docker_build_commands = addon.get("additional_docker_build_commands", "")
dockerfile_contents = "\n".join([
tilt_helper_dockerfile_header,
additional_docker_helper_commands,
tilt_dockerfile_header,
additional_docker_build_commands,
])

# Set up an image build for the provider. The live update configuration syncs the output from the local_resource
# build into the container.
docker_build_args = {}
docker_build_args["binary"] = binary_name
docker_build(
ref = "gcr.io/k8s-staging-cluster-api/" + name,
context = context + "/.tiltbuild/bin/",
build_args = docker_build_args,
dockerfile_contents = dockerfile_contents,
target = "tilt",
only = binary_name,
live_update = [
sync(context + "/.tiltbuild/bin/" + binary_name, "/" + binary_name),
run("sh /restart.sh"),
],
)

if addon.get("kustomize_config", True):
yaml = read_file("./.tiltbuild/yaml/{}.addon.yaml".format(name))
k8s_yaml(yaml)
objs = decode_yaml_stream(yaml)
k8s_resource(
workload = find_object_name(objs, "Deployment"),
new_name = name.lower() + "_addon",
labels = [label, "ALL.addons"],
)

additional_resources = addon.get("additional_resources", [])
for resource in additional_resources:
yaml = read_file(context + "/" + additional_resources[resource])
k8s_yaml(yaml)
objs = decode_yaml_stream(yaml)
k8s_resource(
workload = name.lower() + "_addon",
objects = [find_object_qualified_name(objs, resource)],
resource_deps = ["capi_controller"],
labels = [label, "ALL.addons"],
)

def find_object_name(objs, kind):
for o in objs:
if o["kind"] == kind:
Expand Down Expand Up @@ -395,14 +529,31 @@ def prepare_all():
context = context,
)

addons_arg = ""
addon_substitutions = {}
for name in settings.get("deploy_addons", []):
addon = addons.get(name)
if addon == None:
fail("Addon with name " + name + " not found")
if addon.get("kustomize_config", True):
addon_substitutions = addon.get("kustomize_substitutions", {})
context = addon.get("context")
addons_arg = addons_arg + "--addons {name}:{container_name}:{binary_name}:{context} ".format(
name = name,
container_name = addon.get("container_name"),
binary_name = addon.get("binary_name"),
context = context,
)
tilt_settings_file_arg = "--tilt-settings-file " + tilt_file

cmd = "make -B tilt-prepare && ./hack/tools/bin/tilt-prepare {allow_k8s_arg}{tools_arg}{cert_manager_arg}{kustomize_build_arg}{providers_arg}{tilt_settings_file_arg}".format(
os.environ["SERVICE_NAMESPACE"] = addon_substitutions.get("SERVICE_NAMESPACE", "default")
cmd = "make -B tilt-prepare && ./hack/tools/bin/tilt-prepare {allow_k8s_arg}{tools_arg}{cert_manager_arg}{kustomize_build_arg}{providers_arg}{addons_arg}{tilt_settings_file_arg}".format(
allow_k8s_arg = allow_k8s_arg,
tools_arg = tools_arg,
cert_manager_arg = cert_manager_arg,
kustomize_build_arg = kustomize_build_arg,
providers_arg = providers_arg,
addons_arg = addons_arg,
tilt_settings_file_arg = tilt_settings_file_arg,
)
local(cmd, env = settings.get("kustomize_substitutions", {}))
Expand Down Expand Up @@ -515,6 +666,8 @@ include_user_tilt_files()

load_provider_tiltfiles()

load_addon_tiltfiles()

prepare_all()

deploy_provider_crds()
Expand All @@ -523,4 +676,6 @@ deploy_observability()

enable_providers()

deploy_addons()

cluster_templates()
74 changes: 74 additions & 0 deletions hack/tools/tilt-prepare/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ var (
certManagerFlag = pflag.Bool("cert-manager", false, "prepare cert-manager")
kustomizeBuildsFlag = pflag.StringSlice("kustomize-builds", []string{}, "list of kustomize build to be run; each value should be in the form name:path")
providersBuildsFlag = pflag.StringSlice("providers", []string{}, "list of providers to be installed; each value should be in the form name:path")
addonsBuildsFlag = pflag.StringSlice("addons", []string{}, "list of addons to be installed; each value should be in the form name:container_name:binary_name:path")
allowK8SContextsFlag = pflag.StringSlice("allow-k8s-contexts", []string{}, "Specifies that Tilt is allowed to run against the specified k8s context name; Kind is automatically allowed")
tiltSettingsFileFlag = pflag.String("tilt-settings-file", "./tilt-settings.yaml", "Path to a tilt-settings.(json|yaml) file")
)
Expand Down Expand Up @@ -259,6 +260,19 @@ func tiltResources(ctx context.Context, ts *tiltSettings) error {
tasks[name] = providerTask(name, fmt.Sprintf("%s/config/default", path), ts)
}

// Add an addon task for each name/container_name/binary_name/path defined using the --addon flag.
for _, addon := range *addonsBuildsFlag {
addonValues := strings.Split(addon, ":")
if len(addonValues) != 4 {
return errors.Errorf(
"[resources] failed to parse --addon flag %s: value should be in the form of name:container_name:binary_name:path", addon)
}
name := addonValues[0]
containerName := addonValues[1]
binaryName := addonValues[2]
path := addonValues[3]
tasks[name] = addonTask(name, containerName, binaryName, fmt.Sprintf("%s/config/default", path), ts)
}
return runTaskGroup(ctx, "resources", tasks)
}

Expand Down Expand Up @@ -495,6 +509,54 @@ func providerTask(name, path string, ts *tiltSettings) taskFunction {
}
}

// addonTask generates a task for creating the component yaml for an addon and saving the output on a file.
// NOTE: This task has several sub steps including running kustomize and envsubst.
func addonTask(name, containerName, binaryName, path string, ts *tiltSettings) taskFunction {
return func(ctx context.Context, prefix string, errCh chan error) {
kustomizeCmd := exec.CommandContext(ctx, kustomizePath, "build", path)
var stdout1, stderr1 bytes.Buffer
kustomizeCmd.Dir = rootPath
kustomizeCmd.Stdout = &stdout1
kustomizeCmd.Stderr = &stderr1
if err := kustomizeCmd.Run(); err != nil {
errCh <- errors.Wrapf(err, "[%s] failed to run %s: %s", prefix, kustomizeCmd.Args, stderr1.String())
return
}

envsubstCmd := exec.CommandContext(ctx, envsubstPath)
var stdout2, stderr2 bytes.Buffer
envsubstCmd.Dir = rootPath
envsubstCmd.Stdin = bytes.NewReader(stdout1.Bytes())
envsubstCmd.Stdout = &stdout2
envsubstCmd.Stderr = &stderr2
if err := envsubstCmd.Run(); err != nil {
errCh <- errors.Wrapf(err, "[%s] failed to run %s: %s", prefix, envsubstCmd.Args, stderr2.String())
return
}

objs, err := utilyaml.ToUnstructured(stdout2.Bytes())
if err != nil {
errCh <- errors.Wrapf(err, "[%s] failed parse components yaml", prefix)
return
}

if err := prepareAddonDeployment(name, containerName, binaryName, prefix, objs, ts); err != nil {
errCh <- err
return
}

yaml, err := utilyaml.FromUnstructured(objs)
if err != nil {
errCh <- errors.Wrapf(err, "[%s] failed convert unstructured objects to yaml", prefix)
return
}

if err := writeIfChanged(prefix, filepath.Join(tiltBuildPath, "yaml", fmt.Sprintf("%s.addon.yaml", name)), yaml); err != nil {
errCh <- err
}
}
}

// writeIfChanged writes yaml to a file if the file does not exist or if the content has changed.
// NOTE: Skipping write in case the content is not changed avoids unnecessary Tiltfile reload.
func writeIfChanged(prefix string, path string, yaml []byte) error {
Expand Down Expand Up @@ -525,6 +587,18 @@ func writeIfChanged(prefix string, path string, yaml []byte) error {
return nil
}

func prepareAddonDeployment(name, containerName, binaryName, prefix string, objs []unstructured.Unstructured, ts *tiltSettings) error {
return updateDeployment(prefix, objs, func(d *appsv1.Deployment) {
for j, container := range d.Spec.Template.Spec.Containers {
if container.Name == containerName {
container.Command = []string{"sh", "/start.sh", "/" + binaryName}
container.Args = append(container.Args, []string(ts.ExtraArgs[name])...)
}
d.Spec.Template.Spec.Containers[j] = container
}
})
}

// prepareManagerDeployment sets the Command and Args for the manager container according to the given tiltSettings.
// If there is a debug config given for the provider, we modify Command and Args to work nicely with the delve debugger.
// If there are extra_args given for the provider, we append those to the ones that already exist in the deployment.
Expand Down
10 changes: 5 additions & 5 deletions test/e2e/cluster_upgrade_runtimesdk.go
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ func extensionConfig(specName string, namespace *corev1.Namespace) *runtimev1.Ex
func responsesConfigMap(name string, namespace *corev1.Namespace) *corev1.ConfigMap {
return &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-hookresponses", name),
Name: fmt.Sprintf("test-extension-hookresponses", name),
Namespace: namespace.Name,
},
// Set the initial preloadedResponses for each of the tested hooks.
Expand All @@ -338,7 +338,7 @@ func checkLifecycleHookResponses(ctx context.Context, c client.Client, namespace
for hookName, expectedResponse := range expectedHookResponses {
actualResponse, ok := responseData[hookName+"-actualResponseStatus"]
if !ok {
return errors.Errorf("hook %s call not recorded in configMap %s", hookName, klog.KRef(namespace, clusterName+"-hookresponses"))
return errors.Errorf("hook %s call not recorded in configMap %s/%s", hookName, namespace, "test-extension-hookresponses")
}
if expectedResponse != "" && expectedResponse != actualResponse {
return errors.Errorf("hook %s was expected to be %s in configMap got %s", hookName, expectedResponse, actualResponse)
Expand All @@ -352,15 +352,15 @@ func checkLifecycleHooksCalledAtLeastOnce(ctx context.Context, c client.Client,
responseData := getLifecycleHookResponsesFromConfigMap(ctx, c, namespace, clusterName)
for _, hookName := range expectedHooks {
if _, ok := responseData[hookName+"-actualResponseStatus"]; !ok {
return errors.Errorf("hook %s call not recorded in configMap %s", hookName, klog.KRef(namespace, clusterName+"-hookresponses"))
return errors.Errorf("hook %s call not recorded in configMap %s/%s", hookName, namespace, "test-extension-hookresponses")
}
}
return nil
}

func getLifecycleHookResponsesFromConfigMap(ctx context.Context, c client.Client, namespace string, clusterName string) map[string]string {
configMap := &corev1.ConfigMap{}
configMapName := clusterName + "-hookresponses"
configMapName := "test-extension-hookresponses"
Eventually(func() error {
return c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: configMapName}, configMap)
}).Should(Succeed(), "Failed to get the hook response configmap")
Expand Down Expand Up @@ -473,7 +473,7 @@ func runtimeHookTestHandler(ctx context.Context, c client.Client, namespace, clu
// Patch the ConfigMap to set the hook response to "Success".
Byf("Setting %s response to Status:Success to unblock the reconciliation", hookName)

configMap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: clusterName + "-hookresponses", Namespace: namespace}}
configMap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-extension-hookresponses", Namespace: namespace}}
Eventually(func() error {
return c.Get(ctx, util.ObjectKey(configMap), configMap)
}).Should(Succeed(), "Failed to get ConfigMap %s", klog.KObj(configMap))
Expand Down
19 changes: 19 additions & 0 deletions test/extension/extensionconfig.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
apiVersion: runtime.cluster.x-k8s.io/v1alpha1
kind: ExtensionConfig
metadata:
annotations:
runtime.cluster.x-k8s.io/inject-ca-from-secret: default/webhook-service-cert
name: test-extension
namespace: default
spec:
clientConfig:
service:
name: webhook-service
namespace: default
port: 443
namespaceSelector:
matchExpressions:
- key: kubernetes.io/metadata.name
operator: In
values:
- default
Loading

0 comments on commit 811037e

Please sign in to comment.