Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix bugs which prevented upgrades from v1.0+ to v1.12 #8741

Merged
merged 16 commits into from
Jul 17, 2020
Merged
Show file tree
Hide file tree
Changes from 15 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion cmd/minikube/cmd/start.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,10 @@ func runStart(cmd *cobra.Command, args []string) {
exit.WithCodeT(exit.Data, "Unable to load config: {{.error}}", out.V{"error": err})
}

if existing != nil {
upgradeExistingConfig(existing)
}

validateSpecifiedDriver(existing)
validateKubernetesVersion(existing)
ds, alts, specified := selectDriver(existing)
Expand Down Expand Up @@ -562,7 +566,10 @@ func hostDriver(existing *config.ClusterConfig) string {
machineName := driver.MachineName(*existing, cp)
h, err := api.Load(machineName)
if err != nil {
glog.Warningf("selectDriver api.Load: %v", err)
glog.Warningf("api.Load failed for %s: %v", machineName, err)
if existing.VMDriver != "" {
return existing.VMDriver
}
return existing.Driver
}

Expand Down
26 changes: 26 additions & 0 deletions cmd/minikube/cmd/start_flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -337,6 +337,8 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
}
}

glog.Infof("config:\n%+v", cc)

r, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime})
if err != nil {
return cc, config.Node{}, errors.Wrap(err, "new runtime manager")
Expand All @@ -355,9 +357,33 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
return createNode(cc, kubeNodeName, existing)
}

// upgradeExistingConfig upgrades legacy configuration files
func upgradeExistingConfig(cc *config.ClusterConfig) {
if cc == nil {
return
}

if cc.VMDriver != "" && cc.Driver == "" {
glog.Infof("config upgrade: Driver=%s", cc.VMDriver)
cc.Driver = cc.VMDriver
}

if cc.Name == "" {
glog.Infof("config upgrade: Name=%s", ClusterFlagValue())
cc.Name = ClusterFlagValue()
}

if cc.KicBaseImage == "" {
// defaults to kic.BaseImage
cc.KicBaseImage = viper.GetString(kicBaseImage)
glog.Infof("config upgrade: KicBaseImage=%s", cc.KicBaseImage)
}
}

// updateExistingConfigFromFlags will update the existing config from the flags - used on a second start
// skipping updating existing docker env , docker opt, InsecureRegistry, registryMirror, extra-config, apiserver-ips
func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterConfig) config.ClusterConfig { //nolint to suppress cyclomatic complexity 45 of func `updateExistingConfigFromFlags` is high (> 30)

validateFlags(cmd, existing.Driver)

cc := *existing
Expand Down
1 change: 1 addition & 0 deletions pkg/minikube/config/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ type ClusterConfig struct {
Memory int
CPUs int
DiskSize int
VMDriver string // Legacy use only
Driver string
HyperkitVpnKitSock string // Only used by the Hyperkit driver
HyperkitVSockPorts []string // Only used by the Hyperkit driver
Expand Down
35 changes: 33 additions & 2 deletions pkg/minikube/localpath/localpath.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"strings"

"github.com/golang/glog"
"github.com/otiai10/copy"
"github.com/pkg/errors"
"k8s.io/client-go/util/homedir"
)
Expand Down Expand Up @@ -62,12 +63,42 @@ func Profile(name string) string {

// ClientCert returns client certificate path, used by kubeconfig
func ClientCert(name string) string {
return filepath.Join(Profile(name), "client.crt")
new := filepath.Join(Profile(name), "client.crt")
if _, err := os.Stat(new); err == nil {
return new
}

// minikube v1.5.x
legacy := filepath.Join(MiniPath(), "client.crt")
if _, err := os.Stat(legacy); err == nil {
glog.Infof("copying %s -> %s", legacy, new)
if err := copy.Copy(legacy, new); err != nil {
glog.Errorf("failed copy %s -> %s: %v", legacy, new, err)
return legacy
}
}

return new
}

// ClientKey returns client certificate path, used by kubeconfig
func ClientKey(name string) string {
return filepath.Join(Profile(name), "client.key")
new := filepath.Join(Profile(name), "client.key")
if _, err := os.Stat(new); err == nil {
return new
}

// minikube v1.5.x
legacy := filepath.Join(MiniPath(), "client.key")
if _, err := os.Stat(legacy); err == nil {
glog.Infof("copying %s -> %s", legacy, new)
if err := copy.Copy(legacy, new); err != nil {
glog.Errorf("failed copy %s -> %s: %v", legacy, new, err)
return legacy
}
}

return new
}

// CACert returns the minikube CA certificate shared between profiles
Expand Down
1 change: 0 additions & 1 deletion pkg/minikube/machine/start.go
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,6 @@ func showHostInfo(cfg config.ClusterConfig) {

// AddHostAlias makes fine adjustments to pod resources that aren't possible via kubeadm config.
func AddHostAlias(c command.Runner, name string, ip net.IP) error {
glog.Infof("checking")
record := fmt.Sprintf("%s\t%s", ip, name)
if _, err := c.RunCmd(exec.Command("grep", record+"$", "/etc/hosts")); err == nil {
return nil
Expand Down
190 changes: 162 additions & 28 deletions test/integration/version_upgrade_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,64 +28,149 @@ import (
"testing"
"time"

"github.com/docker/machine/libmachine/state"

"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/util/retry"

"github.com/docker/machine/libmachine/state"
"github.com/hashicorp/go-getter"
pkgutil "k8s.io/minikube/pkg/util"
)

// TestVersionUpgrade downloads the latest version of minikube and runs with
// the oldest supported k8s version and then runs the current head minikube
// and tries to upgrade from the oldest supported k8s to newest supported k8s
func TestVersionUpgrade(t *testing.T) {
func installRelease(version string) (f *os.File, err error) {
tf, err := ioutil.TempFile("", fmt.Sprintf("minikube-%s.*.exe", version))
if err != nil {
return tf, err
}
tf.Close()

url := pkgutil.GetBinaryDownloadURL(version, runtime.GOOS)

if err := retry.Expo(func() error { return getter.GetFile(tf.Name(), url) }, 3*time.Second, Minutes(3)); err != nil {
return tf, err
}

if runtime.GOOS != "windows" {
if err := os.Chmod(tf.Name(), 0700); err != nil {
return tf, err
}
}

return tf, nil
}

// legacyStartArgs returns the arguments normally used for starting older versions of minikube
func legacyStartArgs() []string {
return strings.Split(strings.Replace(*startArgs, "--driver", "--vm-driver", -1), " ")
}

// TestRunningBinaryUpgrade does an upgrade test on a running cluster
func TestRunningBinaryUpgrade(t *testing.T) {
MaybeParallel(t)
profile := UniqueProfileName("vupgrade")
profile := UniqueProfileName("running-upgrade")
ctx, cancel := context.WithTimeout(context.Background(), Minutes(55))

defer CleanupWithLogs(t, profile, cancel)

tf, err := ioutil.TempFile("", "minikube-release.*.exe")
// Should be a version from the last 6 months
legacyVersion := "v1.6.2"
if KicDriver() {
// v1.8.0 would be selected, but: https://github.com/kubernetes/minikube/issues/8740
legacyVersion = "v1.9.0"
}

tf, err := installRelease(legacyVersion)
if err != nil {
t.Fatalf("tempfile: %v", err)
t.Fatalf("%s release installation failed: %v", legacyVersion, err)
}
defer os.Remove(tf.Name())
tf.Close()

url := pkgutil.GetBinaryDownloadURL("latest", runtime.GOOS)
if err := retry.Expo(func() error { return getter.GetFile(tf.Name(), url) }, 3*time.Second, Minutes(3)); err != nil {
t.Fatalf("get failed: %v", err)
args := append([]string{"start", "-p", profile, "--memory=2200"}, legacyStartArgs()...)
rr := &RunResult{}
r := func() error {
rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), args...))
return err
}

if runtime.GOOS != "windows" {
if err := os.Chmod(tf.Name(), 0700); err != nil {
t.Errorf("chmod: %v", err)
}
// Retry up to two times, to allow flakiness for the legacy release
if err := retry.Expo(r, 1*time.Second, Minutes(30), 2); err != nil {
t.Fatalf("legacy %s start failed: %v", legacyVersion, err)
}

args = append([]string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=1"}, StartArgs()...)
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Fatalf("upgrade from %s to HEAD failed: %s: %v", legacyVersion, rr.Command(), err)
}
}

// TestStoppedBinaryUpgrade does an upgrade test on a stopped cluster
func TestStoppedBinaryUpgrade(t *testing.T) {

MaybeParallel(t)
profile := UniqueProfileName("stopped-upgrade")
ctx, cancel := context.WithTimeout(context.Background(), Minutes(55))

defer CleanupWithLogs(t, profile, cancel)

// Guarantee stopped upgrade compatibility from a release that is at least 1 year old
// NOTE: <v1.4.0 does not automatically install a hyperkit/KVM driver
legacyVersion := "v1.0.0"

if KicDriver() {
// first release with non-experimental KIC
legacyVersion = "v1.8.0"
}

// Assert that --iso-url works without a sha checksum, and that we can upgrade from old ISO's
// Some day, this will break an implicit assumption that a tool is available in the ISO :)
oldISO := "https://storage.googleapis.com/minikube/iso/integration-test.iso"
args := append([]string{"start", "-p", profile, "--memory=2200", fmt.Sprintf("--iso-url=%s", oldISO), fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion), "--alsologtostderr"}, StartArgs()...)
tf, err := installRelease(legacyVersion)
if err != nil {
t.Fatalf("%s release installation failed: %v", legacyVersion, err)
}
defer os.Remove(tf.Name())

args := append([]string{"start", "-p", profile, "--memory=2200"}, legacyStartArgs()...)
rr := &RunResult{}
r := func() error {
rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), args...))
return err
}

// Retry up to two times, to allow flakiness for the previous release
// Retry up to two times, to allow flakiness for the legacy release
if err := retry.Expo(r, 1*time.Second, Minutes(30), 2); err != nil {
t.Fatalf("release start failed: %v", err)
t.Fatalf("legacy %s start failed: %v", legacyVersion, err)
}

rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), "-p", profile, "stop"))
if err != nil {
t.Errorf("failed to stop cluster: %s: %v", rr.Command(), err)
}

args = append([]string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=1"}, StartArgs()...)
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Fatalf("upgrade from %s to HEAD failed: %s: %v", legacyVersion, rr.Command(), err)
}
}

// TestKubernetesUpgrade upgrades Kubernetes from oldest to newest
func TestKubernetesUpgrade(t *testing.T) {
MaybeParallel(t)
profile := UniqueProfileName("kubernetes-upgrade")
ctx, cancel := context.WithTimeout(context.Background(), Minutes(55))

rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), "stop", "-p", profile))
defer CleanupWithLogs(t, profile, cancel)

args := append([]string{"start", "-p", profile, "--memory=2200", fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...)
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Errorf("failed to start minikube HEAD with oldest k8s version: %s: %v", rr.Command(), err)
}

rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile))
if err != nil {
t.Fatalf("%s failed: %v", rr.Command(), err)
}

rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), "-p", profile, "status", "--format={{.Host}}"))
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--format={{.Host}}"))
if err != nil {
t.Logf("status error: %v (may be ok)", err)
}
Expand All @@ -98,7 +183,7 @@ func TestVersionUpgrade(t *testing.T) {
args = append([]string{"start", "-p", profile, "--memory=2200", fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...)
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Errorf("failed to start minikube HEAD with newest k8s version. args: %s : %v", rr.Command(), err)
t.Errorf("failed to upgrade with newest k8s version. args: %s : %v", rr.Command(), err)
}

s, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "version", "--output=json"))
Expand All @@ -122,15 +207,64 @@ func TestVersionUpgrade(t *testing.T) {

t.Logf("Attempting to downgrade Kubernetes (should fail)")
args = append([]string{"start", "-p", profile, "--memory=2200", fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion)}, StartArgs()...)
if rr, err := Run(t, exec.CommandContext(ctx, tf.Name(), args...)); err == nil {
if rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)); err == nil {
t.Fatalf("downgrading Kubernetes should not be allowed. expected to see error but got %v for %q", err, rr.Command())
}

t.Logf("Attempting restart after unsuccessful downgrade")
args = append([]string{"start", "-p", profile, "--memory=2200", fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...)
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Errorf("start after failed upgrade: %v", err)
t.Errorf("start after failed upgrade: %s: %v", rr.Command(), err)
}
}

// TestMissingContainerUpgrade tests a Docker upgrade where the underlying container is missing
tstromberg marked this conversation as resolved.
Show resolved Hide resolved
func TestMissingContainerUpgrade(t *testing.T) {
if !DockerDriver() {
t.Skipf("This test is only for Docker")
}

MaybeParallel(t)
profile := UniqueProfileName("missing-upgrade")
ctx, cancel := context.WithTimeout(context.Background(), Minutes(55))

defer CleanupWithLogs(t, profile, cancel)

legacyVersion := "v1.9.1"

tf, err := installRelease(legacyVersion)
if err != nil {
t.Fatalf("%s release installation failed: %v", legacyVersion, err)
}
defer os.Remove(tf.Name())

args := append([]string{"start", "-p", profile, "--memory=2200"}, StartArgs()...)
rr := &RunResult{}
r := func() error {
rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), args...))
return err
}

// Retry up to two times, to allow flakiness for the previous release
if err := retry.Expo(r, 1*time.Second, Minutes(30), 2); err != nil {
t.Fatalf("release start failed: %v", err)
}

rr, err = Run(t, exec.CommandContext(ctx, "docker", "stop", profile))
if err != nil {
t.Fatalf("%s failed: %v", rr.Command(), err)
}

rr, err = Run(t, exec.CommandContext(ctx, "docker", "rm", profile))
if err != nil {
t.Fatalf("%s failed: %v", rr.Command(), err)
}

args = append([]string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=1"}, StartArgs()...)
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Errorf("failed missing container upgrade from %s. args: %s : %v", legacyVersion, rr.Command(), err)
}
}

Expand Down