Skip to content

Commit

Permalink
feat(driver/kubernetes): support mount buildkit.toml and qemu installing
Browse files Browse the repository at this point in the history
Signed-off-by: Morlay <[email protected]>
  • Loading branch information
morlay committed Jul 21, 2021
1 parent d9ee3b1 commit 58f184f
Show file tree
Hide file tree
Showing 9 changed files with 266 additions and 52 deletions.
2 changes: 2 additions & 0 deletions docs/reference/buildx_create.md
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,8 @@ Passes additional driver-specific options. Details for each driver:
```
- `kubernetes`
- `image=IMAGE` - Sets the container image to be used for running buildkit.
- `withqemu=(true|false)` - Install QEMU emulation for multi platforms support.
- `qemuimage=IMAGE` - Sets the QEMU emulation image. Defaults to `tonistiigi/binfmt:latest`
- `namespace=NS` - Sets the Kubernetes namespace. Defaults to the current namespace.
- `replicas=N` - Sets the number of `Pod` replicas. Defaults to 1.
- `requests.cpu` - Sets the request CPU value specified in units of Kubernetes CPU. Example `requests.cpu=100m`, `requests.cpu=2`
Expand Down
74 changes: 74 additions & 0 deletions docs/reference/buildx_create_for_multi_arch_build_in_k8s.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
# Buildx create for multi-arch builds in k8s

## With QEMU emulation

```console
$ KUBECONFIG=${KUBECONFIG} \
docker buildx create \
--name=builder \
--platform=linux/amd64,linux/arm64 \
--driver=kubernetes \
--driver-opt=namespace=buildkit,withqemu=true
```

### Known Issues

QEUM is only work well for `executing`, but for `compiling`, it wil be very slow and may be panic.

In the mode, if still want to compile binaries in docker.
please use `FROM --platform=${BUILDPLATFORM}` to disable QEUM for compiling stage.

Example for golang:
```
FROM --platform=${BUILDPLATFORM} golang:1.6 as builder
ARG TARGETARCH
RUN GOARCH=${TARGETARCH} go build -o /bin/app-linux-${TARGETARCH} ./path/to/cmd/app
FROM scratch
ARG TARGETARCH
COPY --from=builder /bin/app-linux-${TARGETARCH} /bin/app
```

## With native nodes

```console
# create builder `builder` and add native x86_64 node
$ KUBECONFIG=${KUBECONFIG} \
docker buildx create \
--name=builder \
--platform=linux/amd64 \
--node=builder-amd64 \
--driver=kubernetes \
--driver-opt=namespace=buildkit,nodeselector="beta.kubernetes.io/arch=amd64"

# append node to same builder with native aarch64 node
$ KUBECONFIG=${KUBECONFIG} \
docker buildx create \
--name=builder --append \
--platform=linux/arm64 \
--node=builder-arm64 \
--driver=kubernetes \
--driver-opt=namespace=buildkit,nodeselector="beta.kubernetes.io/arch=arm64"
```

* `KUBECONFIG` could be different.
* `buildx create` executing on a pod of multi-arch cluster, `KUBECONFIG` could be unset, but make sure the pod `serviceAccount` could access `deplopments,pods,configmaps` of assigned `namespace`

### Known Issues

In this mode, docker build for different arch on matched native host.
The build time may be longer.
Even `FROM --platform=${BUILDPLATFORM}` defined, all stages will build for each arch.

However, it is totally native.
Projects, witch needs to build on native system, will be happy with the mode.

## Tips

Once `buildx create` in k8s, the created deployments will not be removed until `buildx rm` called.
So we could set `RUN --mount=type=cache` for sharing common caches for different projects.

However, For nodejs user.
Don't shared `npm` or `yarn` global caches (`pnpm` will be better), restoring caches may be slower than reinstalling.
1 change: 1 addition & 0 deletions driver/bkimage/bkimage.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,6 @@ package bkimage

const (
DefaultImage = "moby/buildkit:buildx-stable-1" // TODO: make this verified
QemuImage = "tonistiigi/binfmt:latest" // TODO: make this verified
DefaultRootlessImage = DefaultImage + "-rootless"
)
23 changes: 19 additions & 4 deletions driver/kubernetes/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ import (
"github.com/moby/buildkit/util/tracing/detect"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
Expand All @@ -39,15 +41,18 @@ type Driver struct {
factory driver.Factory
minReplicas int
deployment *appsv1.Deployment
configMap *corev1.ConfigMap
clientset *kubernetes.Clientset
deploymentClient clientappsv1.DeploymentInterface
podClient clientcorev1.PodInterface
configMapClient clientcorev1.ConfigMapInterface
podChooser podchooser.PodChooser
}

func (d *Driver) IsMobyDriver() bool {
return false
}

func (d *Driver) Config() driver.InitConfig {
return d.InitConfig
}
Expand All @@ -56,10 +61,20 @@ func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
return progress.Wrap("[internal] booting buildkit", l, func(sub progress.SubLogger) error {
_, err := d.deploymentClient.Get(ctx, d.deployment.Name, metav1.GetOptions{})
if err != nil {
// TODO: return err if err != ErrNotFound
_, err = d.deploymentClient.Create(ctx, d.deployment, metav1.CreateOptions{})
if err != nil {
return errors.Wrapf(err, "error while calling deploymentClient.Create for %q", d.deployment.Name)
if apierrors.IsNotFound(err) {
if d.configMap != nil {
// create ConfigMap first if exists
_, err = d.configMapClient.Create(ctx, d.configMap, metav1.CreateOptions{})
if err != nil {
return errors.Wrapf(err, "error while calling configMapClient.Create for %q", d.configMap.Name)
}
}
_, err = d.deploymentClient.Create(ctx, d.deployment, metav1.CreateOptions{})
if err != nil {
return errors.Wrapf(err, "error while calling deploymentClient.Create for %q", d.deployment.Name)
}
} else {
return errors.Wrapf(err, "error for bootstrap %q", d.deployment.Name)
}
}
return sub.Wrap(
Expand Down
39 changes: 33 additions & 6 deletions driver/kubernetes/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package kubernetes

import (
"context"
"os"
"strconv"
"strings"

Expand Down Expand Up @@ -59,25 +60,40 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
if err != nil {
return nil, err
}

d := &Driver{
factory: f,
InitConfig: cfg,
clientset: clientset,
}

deploymentOpt := &manifest.DeploymentOpt{
Name: deploymentName,
Image: bkimage.DefaultImage,
Replicas: 1,
BuildkitFlags: cfg.BuildkitFlags,
Rootless: false,
Platforms: cfg.Platforms,
WithQemu: false,
QemuImage: bkimage.QemuImage,
}

if cfg.ConfigFile != "" {
buildkitConfig, err := os.ReadFile(cfg.ConfigFile)
if err != nil {
return nil, err
}
deploymentOpt.BuildkitConfig = buildkitConfig
}

loadbalance := LoadbalanceSticky
imageOverride := ""

for k, v := range cfg.DriverOpts {
switch k {
case "image":
imageOverride = v
if v != "" {
deploymentOpt.Image = v
}
case "namespace":
namespace = v
case "replicas":
Expand Down Expand Up @@ -117,20 +133,31 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
return nil, errors.Errorf("invalid loadbalance %q", v)
}
loadbalance = v
case "withqemu":
deploymentOpt.WithQemu, err = strconv.ParseBool(v)
if err != nil {
return nil, err
}
case "qemuimage":
if v != "" {
deploymentOpt.QemuImage = v
}
default:
return nil, errors.Errorf("invalid driver option %s for driver %s", k, DriverName)
}
}
if imageOverride != "" {
deploymentOpt.Image = imageOverride
}
d.deployment, err = manifest.NewDeployment(deploymentOpt)

d.deployment, d.configMap, err = manifest.NewDeployment(deploymentOpt)
if err != nil {
return nil, err
}

d.minReplicas = deploymentOpt.Replicas

d.deploymentClient = clientset.AppsV1().Deployments(namespace)
d.podClient = clientset.CoreV1().Pods(namespace)
d.configMapClient = clientset.CoreV1().ConfigMaps(namespace)

switch loadbalance {
case LoadbalanceSticky:
d.podChooser = &podchooser.StickyPodChooser{
Expand Down
83 changes: 70 additions & 13 deletions driver/kubernetes/manifest/manifest.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,21 @@ import (
)

type DeploymentOpt struct {
Namespace string
Name string
Image string
Replicas int
BuildkitFlags []string
Namespace string
Name string
Image string
Replicas int

// WithQemu
// when true, will install binfmt
WithQemu bool
QemuImage string

BuildkitFlags []string
// BuildkitConfig
// when not empty, will create configmap with buildkit.toml and mounted
BuildkitConfig []byte

Rootless bool
NodeSelector map[string]string
RequestsCPU string
Expand All @@ -31,7 +41,7 @@ const (
AnnotationPlatform = "buildx.docker.com/platform"
)

func NewDeployment(opt *DeploymentOpt) (*appsv1.Deployment, error) {
func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c *corev1.ConfigMap, err error) {
labels := map[string]string{
"app": opt.Name,
}
Expand All @@ -44,7 +54,7 @@ func NewDeployment(opt *DeploymentOpt) (*appsv1.Deployment, error) {
annotations[AnnotationPlatform] = strings.Join(platformutil.Format(opt.Platforms), ",")
}

d := &appsv1.Deployment{
d = &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: appsv1.SchemeGroupVersion.String(),
Kind: "Deployment",
Expand Down Expand Up @@ -91,9 +101,56 @@ func NewDeployment(opt *DeploymentOpt) (*appsv1.Deployment, error) {
},
},
}

if len(opt.BuildkitConfig) > 0 {
c = &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: corev1.SchemeGroupVersion.String(),
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: opt.Namespace,
Name: opt.Name + "-config",
Annotations: annotations,
},
Data: map[string]string{
"buildkitd.toml": string(opt.BuildkitConfig),
},
}

d.Spec.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{{
Name: "config",
MountPath: "/etc/buildkit",
}}

d.Spec.Template.Spec.Volumes = []corev1.Volume{{
Name: "config",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: c.Name,
},
},
},
}}
}

if opt.WithQemu {
d.Spec.Template.Spec.InitContainers = []corev1.Container{
{
Name: "qemu",
Image: opt.QemuImage,
Args: []string{"--install"},
SecurityContext: &corev1.SecurityContext{
Privileged: &privileged,
},
},
}
}

if opt.Rootless {
if err := toRootless(d); err != nil {
return nil, err
return nil, nil, err
}
}

Expand All @@ -104,36 +161,36 @@ func NewDeployment(opt *DeploymentOpt) (*appsv1.Deployment, error) {
if opt.RequestsCPU != "" {
reqCPU, err := resource.ParseQuantity(opt.RequestsCPU)
if err != nil {
return nil, err
return nil, nil, err
}
d.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceCPU] = reqCPU
}

if opt.RequestsMemory != "" {
reqMemory, err := resource.ParseQuantity(opt.RequestsMemory)
if err != nil {
return nil, err
return nil, nil, err
}
d.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceMemory] = reqMemory
}

if opt.LimitsCPU != "" {
limCPU, err := resource.ParseQuantity(opt.LimitsCPU)
if err != nil {
return nil, err
return nil, nil, err
}
d.Spec.Template.Spec.Containers[0].Resources.Limits[corev1.ResourceCPU] = limCPU
}

if opt.LimitsMemory != "" {
limMemory, err := resource.ParseQuantity(opt.LimitsMemory)
if err != nil {
return nil, err
return nil, nil, err
}
d.Spec.Template.Spec.Containers[0].Resources.Limits[corev1.ResourceMemory] = limMemory
}

return d, nil
return
}

func toRootless(d *appsv1.Deployment) error {
Expand Down
Loading

0 comments on commit 58f184f

Please sign in to comment.