From eb0790bb3de68881940b082f8a90e30943d5ece9 Mon Sep 17 00:00:00 2001 From: David Steiman Date: Thu, 25 Jan 2018 03:33:00 +0100 Subject: [PATCH] adds rudimentary cluster management. This version already can successfully create a running multi-node cluster --- Gopkg.toml | 3 + README.md | 41 +++++++- cmd/clusterCreate.go | 231 ++++++++++++++++++++++++++++++++++++++++++- cmd/config.go | 66 ++++++++----- cmd/types.go | 60 +++++++++++ cmd/util.go | 118 ++++++++++++++++++++++ 6 files changed, 490 insertions(+), 29 deletions(-) create mode 100644 Gopkg.toml create mode 100644 cmd/types.go create mode 100644 cmd/util.go diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 00000000..6413c361 --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,3 @@ +[[constraint]] + branch = "master" + name = "github.com/thcyron/uiprogress" diff --git a/README.md b/README.md index be981393..29155ed1 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,43 @@ -# hetzner-kube +# hetzner-kube: fast and easy setup of kubernetes clusters on Hetzner Cloud This project contains a CLI tool to easily provision [kubernetes](https://kubernetes.io) clusters on [Hetzner Cloud](https://hetzner.com/cloud). -This is my very first tool written in Go. Usage and docs will come soon. \ No newline at end of file +This is my very first tool written in Go. + +## How to install + +Currently, the only way is + +``` +$ go get -u github.com/xetys/hetzner-kube +``` + +## Usage + +In your [Hetzner Console](https://console.hetzner.cloud) generate an API token and + +``` +$ hetzner-kube context add my-project +Token: +``` + +Then you need to add an SSH key: + +``` +$ hetzner-kube ssh-key add -n my-key +``` + +This assumes, you already have a SSH keypair `~/.ssh/id_rsa` and `~/.ssh/id_rsa.pub` + +And finally you can create a cluster by running: + +``` +$ hetzner-kube cluster create --name my-cluster --nodes 3 --ssh-key my-key + +``` + +This will provision a brand new kubernetes cluster in latest version! + + +More info will come as more development happens here... diff --git a/cmd/clusterCreate.go b/cmd/clusterCreate.go index f1d654dc..fe0911af 100644 --- a/cmd/clusterCreate.go +++ b/cmd/clusterCreate.go @@ -17,6 +17,11 @@ package cmd import ( "github.com/spf13/cobra" "log" + "errors" + "fmt" + "github.com/hetznercloud/hcloud-go/hcloud" + "strings" + "time" ) // clusterCreateCmd represents the clusterCreate command @@ -29,11 +34,230 @@ and usage of using your command. For example: Cobra is a CLI library for Go that empowers applications. This application is a tool to generate the needed files to quickly create a Cobra application.`, + PreRunE: validateClusterCreateFlags, Run: func(cmd *cobra.Command, args []string) { - log.Fatalln("not implemented!") + + nodeCount, _ := cmd.Flags().GetInt("nodes") + workerCount := nodeCount - 1 + clusterName, _ := cmd.Flags().GetString("name") + sshKeyName, _ := cmd.Flags().GetString("ssh-key") + masterServerType, _ := cmd.Flags().GetString("master-server-type") + workerServerType, _ := cmd.Flags().GetString("worker-server-type") + cluster := Cluster{Name: clusterName} + + if err := cluster.CreateMasterNodes(Node{SSHKeyName: sshKeyName, IsMaster: true, Type: masterServerType}, 1); err != nil { + log.Println(err) + } + + if workerCount > 0 { + if err := cluster.CreateWorkerNodes(Node{SSHKeyName: sshKeyName, IsMaster: false, Type: workerServerType}, workerCount); err != nil { + log.Fatal(err) + } + } + + log.Println("sleep for 10s...") + time.Sleep(10 * time.Second) + + // provision nodes + if err := cluster.ProvisionNodes(); err != nil { + log.Fatal(err) + } + + // install master + if err := cluster.InstallMaster(); err != nil { + log.Fatal(err) + } + + // install worker + if err := cluster.InstallWorkers(); err != nil { + log.Fatal(err) + } + + log.Println("Cluster successfully created!") + + AppConf.Config.AddCluster(cluster) + AppConf.Config.WriteCurrentConfig() }, } +func (cluster *Cluster) CreateNodes(suffix string, template Node, count int) error { + sshKey, _, err := AppConf.Client.SSHKey.Get(AppConf.Context, template.SSHKeyName) + + if err != nil { + return err + } + + serverNameTemplate := fmt.Sprintf("%s-%s-@idx", cluster.Name, suffix) + serverOptsTemplate := hcloud.ServerCreateOpts{ + Name: serverNameTemplate, + ServerType: &hcloud.ServerType{ + Name: template.Type, + }, + Image: &hcloud.Image{ + Name: "ubuntu-16.04", + }, + } + + serverOptsTemplate.SSHKeys = append(serverOptsTemplate.SSHKeys, sshKey) + + for i := 1; i <= count; i++ { + var serverOpts hcloud.ServerCreateOpts + serverOpts = serverOptsTemplate + serverOpts.Name = strings.Replace(serverNameTemplate, "@idx", fmt.Sprintf("%.02d", i), 1) + + // create + server, err := runCreateServer(&serverOpts) + + if err != nil { + return err + } + + ipAddress := server.Server.PublicNet.IPv4.IP.String() + log.Printf("Created node '%s' with IP %s", server.Server.Name, ipAddress) + cluster.Nodes = append(cluster.Nodes, Node{ + Name: serverOpts.Name, + Type: serverOpts.ServerType.Name, + IsMaster: template.IsMaster, + IPAddress: ipAddress, + SSHKeyName: template.SSHKeyName, + }) + } + + return nil +} + +func runCreateServer(opts *hcloud.ServerCreateOpts) (*hcloud.ServerCreateResult, error) { + + log.Printf("creating server '%s'...", opts.Name) + result, _, err := AppConf.Client.Server.Create(AppConf.Context, *opts) + if err != nil { + if err.(hcloud.Error).Code == "uniqueness_error" { + server, _, err := AppConf.Client.Server.Get(AppConf.Context, opts.Name) + + if err != nil { + return nil, err + } + + log.Printf("loading server '%s'...", opts.Name) + return &hcloud.ServerCreateResult{Server: server}, nil + } + + return nil, err + } + + if err := AppConf.ActionProgress(AppConf.Context, result.Action); err != nil { + return nil, err + } + + return &result, nil +} + +func (cluster *Cluster) CreateMasterNodes(template Node, count int) error { + log.Println("creating master nodes...") + return cluster.CreateNodes("master", template, count) +} + +func (cluster *Cluster) CreateWorkerNodes(template Node, count int) error { + return cluster.CreateNodes("worker", template, count) +} + +func (cluster *Cluster) ProvisionNodes() error { + for _, node := range cluster.Nodes { + log.Printf("installing docker.io and kubeadm on node '%s'...", node.Name) + _, err := runCmd(node, "wget -cO- https://gist.githubusercontent.com/xetys/0ecfa01790debb2345c0883418dcc7c4/raw/403b6cdea6b78bc5b7209acfa3dfa810dd5f89ba/ubuntu16-kubeadm | bash -") + + if err != nil { + return err + } + } + + return nil +} +func (cluster *Cluster) InstallMaster() error { + commands := []string{ + "swapoff -a", + "kubeadm init --pod-network-cidr=192.168.0.0/16", + "mkdir -p $HOME/.kube", + "cp -i /etc/kubernetes/admin.conf $HOME/.kube/config", + "chown $(id -u):$(id -g) $HOME/.kube/config", + "kubectl apply -f https://docs.projectcalico.org/v2.6/getting-started/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml", + } + for _, node := range cluster.Nodes { + if node.IsMaster { + if len(cluster.Nodes) == 1 { + commands = append(commands, "kubectl taint nodes --all node-role.kubernetes.io/master-") + } + + for _, command := range commands { + _, err := runCmd(node, command) + if err != nil { + return err + } + } + + break + } + } + + return nil +} + +func (cluster *Cluster) InstallWorkers() error { + var joinCommand string + // find master + for _, node := range cluster.Nodes { + if node.IsMaster { + output, err := runCmd(node, "kubeadm token create --print-join-command") + if err != nil { + return err + } + joinCommand = output + break + } + } + + // now let the nodes join + + for _, node := range cluster.Nodes { + if !node.IsMaster { + _, err := runCmd(node, "swapoff -a && "+joinCommand) + if err != nil { + return err + } + } + } + + return nil +} + +func validateClusterCreateFlags(cmd *cobra.Command, args []string) error { + + var ( + name, ssh_key, master_server_type, worker_server_type string + ) + if name, _ = cmd.Flags().GetString("name"); name == "" { + return errors.New("flag --name is required") + } + + if ssh_key, _ = cmd.Flags().GetString("ssh-key"); ssh_key == "" { + return errors.New("flag --ssh-key is required") + } + + if master_server_type, _ = cmd.Flags().GetString("master-server-type"); master_server_type == "" { + return errors.New("flag --master_server_type is required") + } + + if worker_server_type, _ = cmd.Flags().GetString("worker-server-type"); worker_server_type == "" { + return errors.New("flag --worker_server_type is required") + } + + if index, _ := AppConf.Config.FindSSHKeyByName(ssh_key); index == -1 { + return errors.New(fmt.Sprintf("SSH key '%s' not found", ssh_key)) + } + + return nil +} + func init() { clusterCmd.AddCommand(clusterCreateCmd) @@ -45,5 +269,10 @@ func init() { // Cobra supports local flags which will only run when this command // is called directly, e.g.: + clusterCreateCmd.Flags().String("name", "", "Name of the cluster") + clusterCreateCmd.Flags().String("ssh-key", "", "Name of the SSH key used for provisioning") + clusterCreateCmd.Flags().String("master-server-type", "cx11", "Server type used of masters") + clusterCreateCmd.Flags().String("worker-server-type", "cx11", "Server type used of workers") + clusterCreateCmd.Flags().Bool("self-hosted", false, "If true, the kubernetes control plane will be hosted on itself") clusterCreateCmd.Flags().IntP("nodes", "n", 2, "Number of nodes for the cluster") } diff --git a/cmd/config.go b/cmd/config.go index d5c8f127..77e11d42 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -11,34 +11,12 @@ import ( "os" "os/user" "path/filepath" + "github.com/go-kit/kit/log/term" + "github.com/thcyron/uiprogress" ) -type HetznerContext struct { - Token string `json:"token"` - Name string `json:"name"` -} - -type SSHKey struct { - Name string `json:"name"` - PrivateKeyPath string `json:"private_key_path"` - PublicKeyPath string `json:"public_key_path"` -} - -type HetznerConfig struct { - ActiveContextName string `json:"active_context_name"` - Contexts []HetznerContext `json:"contexts"` - SSHKeys []SSHKey `json:"ssh_keys"` -} - -type AppConfig struct { - Client *hcloud.Client - Context context.Context - CurrentContext *HetznerContext - Config *HetznerConfig -} var DefaultConfigPath string -var Config HetznerConfig var AppConf AppConfig = AppConfig{} func (config HetznerConfig) WriteCurrentConfig() { @@ -54,8 +32,6 @@ func (config HetznerConfig) WriteCurrentConfig() { } else { log.Fatal(err) } - - Config = config } func (config *HetznerConfig) AddContext(context HetznerContext) { @@ -89,6 +65,17 @@ func (config *HetznerConfig) FindSSHKeyByName(name string) (int, *SSHKey) { return index, nil } +func (config *HetznerConfig) AddCluster(cluster Cluster) { + for i, v := range config.Clusters { + if v.Name == cluster.Name { + config.Clusters[i] = cluster + return + } + } + + config.Clusters = append(config.Clusters, cluster) +} + func (app *AppConfig) SwitchContextByName(name string) error { ctx, err := app.FindContextByName(name) @@ -121,6 +108,33 @@ func (app *AppConfig) FindContextByName(name string) (*HetznerContext, error) { return nil, errors.New(fmt.Sprintf("context '%s' not found", name)) } +func (app *AppConfig) ActionProgress(ctx context.Context, action *hcloud.Action) error { + errCh, progressCh := waitAction(ctx, app.Client, action) + + if term.IsTerminal(os.Stdout){ + progress := uiprogress.New() + + progress.Start() + bar := progress.AddBar(100).AppendCompleted().PrependElapsed() + bar.Empty = ' ' + + for { + select { + case err := <-errCh: + if err == nil { + bar.Set(100) + } + progress.Stop() + return err + case p := <-progressCh: + bar.Set(p) + } + } + } else { + return <-errCh + } +} + func (app *AppConfig) assertActiveContext() error { if app.CurrentContext == nil { return errors.New("no context selected") diff --git a/cmd/types.go b/cmd/types.go new file mode 100644 index 00000000..840e0c2a --- /dev/null +++ b/cmd/types.go @@ -0,0 +1,60 @@ +package cmd + +import ( + "github.com/hetznercloud/hcloud-go/hcloud" + "context" +) + +type HetznerContext struct { + Token string `json:"token"` + Name string `json:"name"` +} + +type SSHKey struct { + Name string `json:"name"` + PrivateKeyPath string `json:"private_key_path"` + PublicKeyPath string `json:"public_key_path"` +} + +type Node struct { + Name string `json:"name"` + Type string `json:"type"` + IsMaster bool `json:"is_master"` + IPAddress string `json:"ip_address"` + SSHKeyName string `json:"ssh_key_name"` +} + +type Cluster struct { + Name string `json:"name"` + Nodes []Node `json:"nodes"` + SelfHosted bool `json:"self_hosted"` +} + + +type ClusterManager interface { + CreateMasterNodes(template Node, count int) error + CreateWorkerNodes(template Node, count int) error + ProvisionNodes() error + InstallMaster() + InstallWorkers() + GetKubeconfig() + +} + +type SSHClient interface { + RunCmd(node *Node, cmd string) +} + +type HetznerConfig struct { + ActiveContextName string `json:"active_context_name"` + Contexts []HetznerContext `json:"contexts"` + SSHKeys []SSHKey `json:"ssh_keys"` + Clusters []Cluster `json:"clusters"` +} + +type AppConfig struct { + Client *hcloud.Client + Context context.Context + CurrentContext *HetznerContext + Config *HetznerConfig +} diff --git a/cmd/util.go b/cmd/util.go new file mode 100644 index 00000000..069a8941 --- /dev/null +++ b/cmd/util.go @@ -0,0 +1,118 @@ +package cmd + +import ( + "context" + "github.com/hetznercloud/hcloud-go/hcloud" + "time" + "io/ioutil" + "bytes" + "log" + "golang.org/x/crypto/ssh" + "errors" + "fmt" +) + +func runCmd(node Node, command string) (output string, err error) { + index, privateKey := AppConf.Config.FindSSHKeyByName(node.SSHKeyName) + if index < 0 { + return "", errors.New(fmt.Sprintf("cound not find SSH key '%s'", node.SSHKeyName)) + } + + pemBytes, err := ioutil.ReadFile(privateKey.PrivateKeyPath) + if err != nil { + log.Fatal(err) + } + signer, err := ssh.ParsePrivateKey(pemBytes) + if err != nil { + log.Fatalf("parse key failed:%v", err) + } + config := &ssh.ClientConfig{ + User: "root", + Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)}, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + var connection *ssh.Client + for try := 0; ; try++ { + connection, err = ssh.Dial("tcp", node.IPAddress+":22", config) + if err != nil { + log.Printf("dial failed:%v", err) + if try > 10 { + return "", err + } + } else { + break + } + time.Sleep(1 * time.Second) + } + defer connection.Close() + log.Println("Connected succeeded!") + session, err := connection.NewSession() + if err != nil { + log.Fatalf("session failed:%v", err) + } + var stdoutBuf bytes.Buffer + var stderrBuf bytes.Buffer + session.Stdout = &stdoutBuf + session.Stderr = &stderrBuf + + err = session.Run(command) + if err != nil { + log.Println(stderrBuf.String()) + log.Fatalf("Run failed:%v", err) + } + log.Printf(">%s", stdoutBuf.String()) + session.Close() + return stdoutBuf.String(), nil +} + +func waitAction(ctx context.Context, client *hcloud.Client, action *hcloud.Action) (<-chan error, <-chan int) { + errCh := make(chan error, 1) + progressCh := make(chan int) + + go func() { + defer close(errCh) + defer close(progressCh) + + ticker := time.NewTicker(100 * time.Millisecond) + + sendProgress := func(p int) { + select { + case progressCh <- p: + break + default: + break + } + } + + for { + select { + case <-ctx.Done(): + errCh <- ctx.Err() + return + case <-ticker.C: + break + } + + action, _, err := client.Action.GetByID(ctx, action.ID) + if err != nil { + errCh <- ctx.Err() + return + } + + switch action.Status { + case hcloud.ActionStatusRunning: + sendProgress(action.Progress) + break + case hcloud.ActionStatusSuccess: + sendProgress(100) + errCh <- nil + return + case hcloud.ActionStatusError: + errCh <- action.Error() + return + } + } + }() + + return errCh, progressCh +}