diff --git a/pkg/api/resource/helpers.go b/pkg/api/resource/helpers.go index 78084393eb23b..d1a1fae848e35 100644 --- a/pkg/api/resource/helpers.go +++ b/pkg/api/resource/helpers.go @@ -70,6 +70,29 @@ func PodRequestsAndLimits(pod *api.Pod) (reqs map[api.ResourceName]resource.Quan } } } + // defer containers define the minimum of any resource + for _, container := range pod.Spec.DeferContainers { + for name, quantity := range container.Resources.Requests { + value, ok := reqs[name] + if !ok { + reqs[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + reqs[name] = *quantity.Copy() + } + } + for name, quantity := range container.Resources.Limits { + value, ok := limits[name] + if !ok { + limits[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + limits[name] = *quantity.Copy() + } + } + } return } diff --git a/pkg/api/testing/serialization_proto_test.go b/pkg/api/testing/serialization_proto_test.go index 45c90e818a0cb..32dd471337fb3 100644 --- a/pkg/api/testing/serialization_proto_test.go +++ b/pkg/api/testing/serialization_proto_test.go @@ -102,6 +102,9 @@ func TestProtobufRoundTrip(t *testing.T) { // InitContainers are turned into annotations by conversion. obj.Spec.InitContainers = nil obj.Status.InitContainerStatuses = nil + // deferContainers are turned into annotations by conversion + obj.Spec.DeferContainers = nil + obj.Status.DeferContainerStatuses = nil data, err := obj.Marshal() if err != nil { t.Fatal(err) diff --git a/pkg/apis/apps/v1/zz_generated.defaults.go b/pkg/apis/apps/v1/zz_generated.defaults.go index d05eff3c5d6d1..f7e8d45d7b9aa 100644 --- a/pkg/apis/apps/v1/zz_generated.defaults.go +++ b/pkg/apis/apps/v1/zz_generated.defaults.go @@ -176,6 +176,48 @@ func SetObjectDefaults_DaemonSet(in *v1.DaemonSet) { } } } + for i := range in.Spec.Template.Spec.DeferContainers { + a := &in.Spec.Template.Spec.DeferContainers[i] + core_v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + core_v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + core_v1.SetDefaults_ResourceList(&a.Resources.Limits) + core_v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + core_v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + core_v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } } func SetObjectDefaults_DaemonSetList(in *v1.DaemonSetList) { @@ -320,6 +362,48 @@ func SetObjectDefaults_Deployment(in *v1.Deployment) { } } } + for i := range in.Spec.Template.Spec.DeferContainers { + a := &in.Spec.Template.Spec.DeferContainers[i] + core_v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + core_v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + core_v1.SetDefaults_ResourceList(&a.Resources.Limits) + core_v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + core_v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + core_v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } } func SetObjectDefaults_DeploymentList(in *v1.DeploymentList) { @@ -464,6 +548,48 @@ func SetObjectDefaults_ReplicaSet(in *v1.ReplicaSet) { } } } + for i := range in.Spec.Template.Spec.DeferContainers { + a := &in.Spec.Template.Spec.DeferContainers[i] + core_v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + core_v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + core_v1.SetDefaults_ResourceList(&a.Resources.Limits) + core_v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + core_v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + core_v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } } func SetObjectDefaults_ReplicaSetList(in *v1.ReplicaSetList) { @@ -608,6 +734,48 @@ func SetObjectDefaults_StatefulSet(in *v1.StatefulSet) { } } } + for i := range in.Spec.Template.Spec.DeferContainers { + a := &in.Spec.Template.Spec.DeferContainers[i] + core_v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + core_v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + core_v1.SetDefaults_ResourceList(&a.Resources.Limits) + core_v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + core_v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + core_v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } for i := range in.Spec.VolumeClaimTemplates { a := &in.Spec.VolumeClaimTemplates[i] core_v1.SetDefaults_PersistentVolumeClaim(a) diff --git a/pkg/apis/apps/v1beta1/zz_generated.defaults.go b/pkg/apis/apps/v1beta1/zz_generated.defaults.go index 47c74ab2c4aed..6076b45b3638d 100644 --- a/pkg/apis/apps/v1beta1/zz_generated.defaults.go +++ b/pkg/apis/apps/v1beta1/zz_generated.defaults.go @@ -172,6 +172,48 @@ func SetObjectDefaults_Deployment(in *v1beta1.Deployment) { } } } + for i := range in.Spec.Template.Spec.DeferContainers { + a := &in.Spec.Template.Spec.DeferContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } } func SetObjectDefaults_DeploymentList(in *v1beta1.DeploymentList) { @@ -316,6 +358,48 @@ func SetObjectDefaults_StatefulSet(in *v1beta1.StatefulSet) { } } } + for i := range in.Spec.Template.Spec.DeferContainers { + a := &in.Spec.Template.Spec.DeferContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } for i := range in.Spec.VolumeClaimTemplates { a := &in.Spec.VolumeClaimTemplates[i] v1.SetDefaults_PersistentVolumeClaim(a) diff --git a/pkg/apis/apps/v1beta2/zz_generated.defaults.go b/pkg/apis/apps/v1beta2/zz_generated.defaults.go index ddf7f6a169e8a..a9a3e65f6634d 100644 --- a/pkg/apis/apps/v1beta2/zz_generated.defaults.go +++ b/pkg/apis/apps/v1beta2/zz_generated.defaults.go @@ -176,6 +176,48 @@ func SetObjectDefaults_DaemonSet(in *v1beta2.DaemonSet) { } } } + for i := range in.Spec.Template.Spec.DeferContainers { + a := &in.Spec.Template.Spec.DeferContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } } func SetObjectDefaults_DaemonSetList(in *v1beta2.DaemonSetList) { @@ -320,6 +362,48 @@ func SetObjectDefaults_Deployment(in *v1beta2.Deployment) { } } } + for i := range in.Spec.Template.Spec.DeferContainers { + a := &in.Spec.Template.Spec.DeferContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } } func SetObjectDefaults_DeploymentList(in *v1beta2.DeploymentList) { @@ -464,6 +548,48 @@ func SetObjectDefaults_ReplicaSet(in *v1beta2.ReplicaSet) { } } } + for i := range in.Spec.Template.Spec.DeferContainers { + a := &in.Spec.Template.Spec.DeferContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } } func SetObjectDefaults_ReplicaSetList(in *v1beta2.ReplicaSetList) { @@ -608,6 +734,48 @@ func SetObjectDefaults_StatefulSet(in *v1beta2.StatefulSet) { } } } + for i := range in.Spec.Template.Spec.DeferContainers { + a := &in.Spec.Template.Spec.DeferContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } for i := range in.Spec.VolumeClaimTemplates { a := &in.Spec.VolumeClaimTemplates[i] v1.SetDefaults_PersistentVolumeClaim(a) diff --git a/pkg/apis/batch/v1/zz_generated.defaults.go b/pkg/apis/batch/v1/zz_generated.defaults.go index 976735a1a4efb..44415944ff986 100644 --- a/pkg/apis/batch/v1/zz_generated.defaults.go +++ b/pkg/apis/batch/v1/zz_generated.defaults.go @@ -170,6 +170,48 @@ func SetObjectDefaults_Job(in *v1.Job) { } } } + for i := range in.Spec.Template.Spec.DeferContainers { + a := &in.Spec.Template.Spec.DeferContainers[i] + core_v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + core_v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + core_v1.SetDefaults_ResourceList(&a.Resources.Limits) + core_v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + core_v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + core_v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } } func SetObjectDefaults_JobList(in *v1.JobList) { diff --git a/pkg/apis/batch/v1beta1/zz_generated.defaults.go b/pkg/apis/batch/v1beta1/zz_generated.defaults.go index e809e80967c6a..db0b0903c48cf 100644 --- a/pkg/apis/batch/v1beta1/zz_generated.defaults.go +++ b/pkg/apis/batch/v1beta1/zz_generated.defaults.go @@ -171,6 +171,48 @@ func SetObjectDefaults_CronJob(in *v1beta1.CronJob) { } } } + for i := range in.Spec.JobTemplate.Spec.Template.Spec.DeferContainers { + a := &in.Spec.JobTemplate.Spec.Template.Spec.DeferContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } } func SetObjectDefaults_CronJobList(in *v1beta1.CronJobList) { @@ -314,4 +356,46 @@ func SetObjectDefaults_JobTemplate(in *v1beta1.JobTemplate) { } } } + for i := range in.Template.Spec.Template.Spec.DeferContainers { + a := &in.Template.Spec.Template.Spec.DeferContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } } diff --git a/pkg/apis/batch/v2alpha1/zz_generated.defaults.go b/pkg/apis/batch/v2alpha1/zz_generated.defaults.go index 8983fcddadec2..b1c6a941d752f 100644 --- a/pkg/apis/batch/v2alpha1/zz_generated.defaults.go +++ b/pkg/apis/batch/v2alpha1/zz_generated.defaults.go @@ -171,6 +171,48 @@ func SetObjectDefaults_CronJob(in *v2alpha1.CronJob) { } } } + for i := range in.Spec.JobTemplate.Spec.Template.Spec.DeferContainers { + a := &in.Spec.JobTemplate.Spec.Template.Spec.DeferContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } } func SetObjectDefaults_CronJobList(in *v2alpha1.CronJobList) { @@ -314,4 +356,46 @@ func SetObjectDefaults_JobTemplate(in *v2alpha1.JobTemplate) { } } } + for i := range in.Template.Spec.Template.Spec.DeferContainers { + a := &in.Template.Spec.Template.Spec.DeferContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } } diff --git a/pkg/apis/core/types.go b/pkg/apis/core/types.go index bcdc0c288994b..7f4c946255b55 100644 --- a/pkg/apis/core/types.go +++ b/pkg/apis/core/types.go @@ -2473,6 +2473,8 @@ type PodSpec struct { InitContainers []Container // List of containers belonging to the pod. Containers []Container + // List of termination containers belonging to the pod. + DeferContainers []Container // +optional RestartPolicy RestartPolicy // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. @@ -2717,6 +2719,9 @@ type PodStatus struct { // when we have done this. // +optional ContainerStatuses []ContainerStatus + //Similar to init containers statuses for deferContainers the only difference is they will be + //in waiting state throughout pod's execution until the pod is getting terminated + DeferContainerStatuses []ContainerStatus } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/apis/core/v1/conversion.go b/pkg/apis/core/v1/conversion.go index 2e650739db569..1db5fb240e1f7 100644 --- a/pkg/apis/core/v1/conversion.go +++ b/pkg/apis/core/v1/conversion.go @@ -19,6 +19,7 @@ package v1 import ( "fmt" "reflect" + "encoding/json" "k8s.io/api/core/v1" @@ -133,6 +134,9 @@ func addConversionFuncs(scheme *runtime.Scheme) error { // Add non-generated conversion functions err := scheme.AddConversionFuncs( Convert_core_Pod_To_v1_Pod, + Convert_v1_Pod_To_core_Pod, + Convert_core_PodStatusResult_To_v1_PodStatusResult, + Convert_v1_PodStatusResult_To_core_PodStatusResult, Convert_core_PodSpec_To_v1_PodSpec, Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, Convert_core_ServiceSpec_To_v1_ServiceSpec, @@ -349,19 +353,131 @@ func Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(in * return nil } +func Convert_core_PodStatusResult_To_v1_PodStatusResult(in *core.PodStatusResult, out *v1.PodStatusResult, s conversion.Scope) error { + if err := autoConvert_core_PodStatusResult_To_v1_PodStatusResult(in, out, s); err != nil { + return err + } + + if old := out.Annotations; old != nil { + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + } + + //Now do the samething for deferContainers too + if len(out.Status.DeferContainerStatuses) > 0 { + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + value, err := json.Marshal(out.Status.DeferContainerStatuses) + if err != nil { + return err + } + out.Annotations[v1.PodDeferContainersStatusesKey] = string(value) + } else { + delete(out.Annotations, v1.PodDeferContainersStatusesKey) + } + return nil +} + +func Convert_v1_PodStatusResult_To_core_PodStatusResult(in *v1.PodStatusResult, out *core.PodStatusResult, s conversion.Scope) error { + // TODO: sometime after we move init container to stable, remove these conversions + //Move the deferContainer Annotation to inernal repr. field + if value, ok := in.Annotations[v1.PodDeferContainersStatusesKey]; ok { + var values []v1.ContainerStatus + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + // Conversion from external to internal version exists more to + // satisfy the needs of the decoder than it does to be a general + // purpose tool. And Decode always creates an intermediate object + // to decode to. Thus the caller of UnsafeConvertToVersion is + // taking responsibility to ensure mutation of in is not exposed + // back to the caller. + in.Status.DeferContainerStatuses = values + } + + if err := autoConvert_v1_PodStatusResult_To_core_PodStatusResult(in, out, s); err != nil { + return err + } + if len(out.Annotations) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + delete(out.Annotations, v1.PodDeferContainersStatusesKey) + } + return nil +} + func Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in *core.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error { if err := autoConvert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil { return err } + // TODO: sometime after we move init container to stable, remove these conversions. + // DeferContainers related code + if old := out.Annotations; old != nil { + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + } + if len(out.Spec.DeferContainers) > 0 { + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + value, err := json.Marshal(out.Spec.DeferContainers) + if err != nil { + return err + } + out.Annotations[v1.PodDeferContainersAnnotationKey] = string(value) + } else { + delete(out.Annotations, v1.PodDeferContainersAnnotationKey) + } + return nil } func Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in *v1.PodTemplateSpec, out *core.PodTemplateSpec, s conversion.Scope) error { + + + // Move the defer Container annotation to the internal repr. field + if value, ok := in.Annotations[v1.PodDeferContainersAnnotationKey]; ok { + var values []v1.Container + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + // Conversion from external to internal version exists more to + // satisfy the needs of the decoder than it does to be a general + // purpose tool. And Decode always creates an intermediate object + // to decode to. Thus the caller of UnsafeConvertToVersion is + // taking responsibility to ensure mutation of in is not exposed + // back to the caller. + in.Spec.DeferContainers = values + + // Call defaulters explicitly until annotations are removed + for i := range in.Spec.DeferContainers { + c := &in.Spec.DeferContainers[i] + SetDefaults_Container(c) + } + } + if err := autoConvert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in, out, s); err != nil { return err } + if len(out.Annotations) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + delete(out.Annotations, v1.PodDeferContainersAnnotationKey) + } + return nil } @@ -422,12 +538,79 @@ func Convert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) e for k, v := range old { out.Annotations[k] = v } - delete(out.Annotations, "pod.beta.kubernetes.io/init-containers") - delete(out.Annotations, "pod.alpha.kubernetes.io/init-containers") - delete(out.Annotations, "pod.beta.kubernetes.io/init-container-statuses") - delete(out.Annotations, "pod.alpha.kubernetes.io/init-container-statuses") + //Annotation keys + delete(out.Annotations, v1.PodDeferContainersAnnotationKey) + delete(out.Annotations, v1.PodDeferContainersStatusesKey) + } + + // DeferContainers related code + if len(out.Spec.DeferContainers) > 0 { + value, err := json.Marshal(out.Spec.DeferContainers) + if err != nil { + return err + } + out.Annotations[v1.PodDeferContainersAnnotationKey] = string(value) + } + if len(out.Status.DeferContainerStatuses) > 0 { + value, err := json.Marshal(out.Status.DeferContainerStatuses) + if err != nil { + return err + } + out.Annotations[v1.PodDeferContainersStatusesKey] = string(value) + } + + return nil +} + +func Convert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scope) error { + // Annotation conversion for deferContainers + if value, ok := in.Annotations[v1.PodDeferContainersAnnotationKey]; ok { + var values []v1.Container + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + // Conversion from external to internal version exists more to + // satisfy the needs of the decoder than it does to be a general + // purpose tool. And Decode always creates an intermediate object + // to decode to. Thus the caller of UnsafeConvertToVersion is + // taking responsibility to ensure mutation of in is not exposed + // back to the caller. + in.Spec.DeferContainers = values + // Call defaulters explicitly until annotations are removed + for i := range in.Spec.DeferContainers { + c := &in.Spec.DeferContainers[i] + SetDefaults_Container(c) + } } + //DeferContainer related conversion code + if value, ok := in.Annotations[v1.PodDeferContainersStatusesKey]; ok { + var values []v1.ContainerStatus + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + // Conversion from external to internal version exists more to + // satisfy the needs of the decoder than it does to be a general + // purpose tool. And Decode always creates an intermediate object + // to decode to. Thus the caller of UnsafeConvertToVersion is + // taking responsibility to ensure mutation of in is not exposed + // back to the caller. + in.Status.DeferContainerStatuses = values + } + + if err := autoConvert_v1_Pod_To_core_Pod(in, out, s); err != nil { + return err + } + + if len(out.Annotations) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + delete(out.Annotations, v1.PodDeferContainersStatusesKey) + delete(out.Annotations, v1.PodDeferContainersAnnotationKey) + } return nil } diff --git a/pkg/apis/core/v1/zz_generated.conversion.go b/pkg/apis/core/v1/zz_generated.conversion.go index 23d32a02e5d65..e751b516fe4b2 100644 --- a/pkg/apis/core/v1/zz_generated.conversion.go +++ b/pkg/apis/core/v1/zz_generated.conversion.go @@ -3357,11 +3357,6 @@ func autoConvert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scop return nil } -// Convert_v1_Pod_To_core_Pod is an autogenerated conversion function. -func Convert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scope) error { - return autoConvert_v1_Pod_To_core_Pod(in, out, s) -} - func autoConvert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_core_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { @@ -3766,6 +3761,17 @@ func autoConvert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s } else { out.Containers = nil } + if in.DeferContainers != nil { + in, out := &in.DeferContainers, &out.DeferContainers + *out = make([]core.Container, len(*in)) + for i := range *in { + if err := Convert_v1_Container_To_core_Container(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.DeferContainers = nil + } out.RestartPolicy = core.RestartPolicy(in.RestartPolicy) out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) @@ -3834,6 +3840,17 @@ func autoConvert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s } else { out.Containers = nil } + if in.DeferContainers != nil { + in, out := &in.DeferContainers, &out.DeferContainers + *out = make([]v1.Container, len(*in)) + for i := range *in { + if err := Convert_core_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.DeferContainers = nil + } out.RestartPolicy = v1.RestartPolicy(in.RestartPolicy) out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) @@ -3874,6 +3891,7 @@ func autoConvert_v1_PodStatus_To_core_PodStatus(in *v1.PodStatus, out *core.PodS out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) out.InitContainerStatuses = *(*[]core.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) out.ContainerStatuses = *(*[]core.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) + out.DeferContainerStatuses = *(*[]core.ContainerStatus)(unsafe.Pointer(&in.DeferContainerStatuses)) out.QOSClass = core.PodQOSClass(in.QOSClass) return nil } @@ -3894,6 +3912,7 @@ func autoConvert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *v1.PodS out.QOSClass = v1.PodQOSClass(in.QOSClass) out.InitContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) out.ContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) + out.DeferContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.DeferContainerStatuses)) return nil } @@ -3910,11 +3929,6 @@ func autoConvert_v1_PodStatusResult_To_core_PodStatusResult(in *v1.PodStatusResu return nil } -// Convert_v1_PodStatusResult_To_core_PodStatusResult is an autogenerated conversion function. -func Convert_v1_PodStatusResult_To_core_PodStatusResult(in *v1.PodStatusResult, out *core.PodStatusResult, s conversion.Scope) error { - return autoConvert_v1_PodStatusResult_To_core_PodStatusResult(in, out, s) -} - func autoConvert_core_PodStatusResult_To_v1_PodStatusResult(in *core.PodStatusResult, out *v1.PodStatusResult, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_core_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { @@ -3923,11 +3937,6 @@ func autoConvert_core_PodStatusResult_To_v1_PodStatusResult(in *core.PodStatusRe return nil } -// Convert_core_PodStatusResult_To_v1_PodStatusResult is an autogenerated conversion function. -func Convert_core_PodStatusResult_To_v1_PodStatusResult(in *core.PodStatusResult, out *v1.PodStatusResult, s conversion.Scope) error { - return autoConvert_core_PodStatusResult_To_v1_PodStatusResult(in, out, s) -} - func autoConvert_v1_PodTemplate_To_core_PodTemplate(in *v1.PodTemplate, out *core.PodTemplate, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { diff --git a/pkg/apis/core/v1/zz_generated.defaults.go b/pkg/apis/core/v1/zz_generated.defaults.go index c1ad46cd38a66..dc76612559433 100644 --- a/pkg/apis/core/v1/zz_generated.defaults.go +++ b/pkg/apis/core/v1/zz_generated.defaults.go @@ -304,6 +304,48 @@ func SetObjectDefaults_Pod(in *v1.Pod) { } } } + for i := range in.Spec.DeferContainers { + a := &in.Spec.DeferContainers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } } func SetObjectDefaults_PodList(in *v1.PodList) { @@ -447,6 +489,48 @@ func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) { } } } + for i := range in.Template.Spec.DeferContainers { + a := &in.Template.Spec.DeferContainers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } } func SetObjectDefaults_PodTemplateList(in *v1.PodTemplateList) { @@ -592,6 +676,48 @@ func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) { } } } + for i := range in.Spec.Template.Spec.DeferContainers { + a := &in.Spec.Template.Spec.DeferContainers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } } } diff --git a/pkg/apis/core/zz_generated.deepcopy.go b/pkg/apis/core/zz_generated.deepcopy.go index 8a077470f0f75..869b57eeb6c81 100644 --- a/pkg/apis/core/zz_generated.deepcopy.go +++ b/pkg/apis/core/zz_generated.deepcopy.go @@ -3845,6 +3845,13 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.DeferContainers != nil { + in, out := &in.DeferContainers, &out.DeferContainers + *out = make([]Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.TerminationGracePeriodSeconds != nil { in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds if *in == nil { @@ -3980,6 +3987,13 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.DeferContainerStatuses != nil { + in, out := &in.DeferContainerStatuses, &out.DeferContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/pkg/apis/extensions/v1beta1/zz_generated.defaults.go b/pkg/apis/extensions/v1beta1/zz_generated.defaults.go index f22282f85f8e1..bced51c7597ad 100644 --- a/pkg/apis/extensions/v1beta1/zz_generated.defaults.go +++ b/pkg/apis/extensions/v1beta1/zz_generated.defaults.go @@ -178,6 +178,48 @@ func SetObjectDefaults_DaemonSet(in *v1beta1.DaemonSet) { } } } + for i := range in.Spec.Template.Spec.DeferContainers { + a := &in.Spec.Template.Spec.DeferContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } } func SetObjectDefaults_DaemonSetList(in *v1beta1.DaemonSetList) { @@ -322,6 +364,48 @@ func SetObjectDefaults_Deployment(in *v1beta1.Deployment) { } } } + for i := range in.Spec.Template.Spec.DeferContainers { + a := &in.Spec.Template.Spec.DeferContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } } func SetObjectDefaults_DeploymentList(in *v1beta1.DeploymentList) { @@ -488,6 +572,48 @@ func SetObjectDefaults_ReplicaSet(in *v1beta1.ReplicaSet) { } } } + for i := range in.Spec.Template.Spec.DeferContainers { + a := &in.Spec.Template.Spec.DeferContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } } func SetObjectDefaults_ReplicaSetList(in *v1beta1.ReplicaSetList) { diff --git a/pkg/kubelet/apis/cri/services.go b/pkg/kubelet/apis/cri/services.go index 9c8ba0899c32b..bd53b80418a09 100644 --- a/pkg/kubelet/apis/cri/services.go +++ b/pkg/kubelet/apis/cri/services.go @@ -52,6 +52,8 @@ type ContainerManager interface { Exec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) // Attach prepares a streaming endpoint to attach to a running container, and returns the address. Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) + // Wait for a Container + WaitForContainer(containerID string) error } // PodSandboxManager contains methods for operating on PodSandboxes. The methods diff --git a/pkg/kubelet/container/helpers.go b/pkg/kubelet/container/helpers.go index 32dbc745b304a..5d1ea11ddf655 100644 --- a/pkg/kubelet/container/helpers.go +++ b/pkg/kubelet/container/helpers.go @@ -239,6 +239,7 @@ func ConvertPodStatusToRunningPod(runtimeName string, podStatus *PodStatus) Pod State: SandboxToContainerState(sandbox.State), }) } + runningPod.Statuses = podStatus return runningPod } diff --git a/pkg/kubelet/container/ref.go b/pkg/kubelet/container/ref.go index f61c0fc4a3b45..b256110c7e451 100644 --- a/pkg/kubelet/container/ref.go +++ b/pkg/kubelet/container/ref.go @@ -69,5 +69,15 @@ func fieldPath(pod *v1.Pod, container *v1.Container) (string, error) { } } } + for i := range pod.Spec.DeferContainers { + here := &pod.Spec.DeferContainers[i] + if here.Name == container.Name { + if here.Name == "" { + return fmt.Sprintf("spec.deferContainers[%d]", i), nil + } else { + return fmt.Sprintf("spec.deferContainers{%s}", here.Name), nil + } + } + } return "", fmt.Errorf("container %#v not found in pod %#v", container, pod) } diff --git a/pkg/kubelet/container/runtime.go b/pkg/kubelet/container/runtime.go index 2f5f4e3d188a5..3478f15b806cb 100644 --- a/pkg/kubelet/container/runtime.go +++ b/pkg/kubelet/container/runtime.go @@ -93,7 +93,7 @@ type Runtime interface { // gracePeriodOverride if specified allows the caller to override the pod default grace period. // only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data. // it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios. - KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error + KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64, pullSecrets []v1.Secret) error // GetPodStatus retrieves the status of the pod, including the // information of all containers in the pod that are visble in Runtime. GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error) @@ -186,6 +186,8 @@ type Pod struct { // components. This is only populated by kuberuntime. // TODO: use the runtimeApi.PodSandbox type directly. Sandboxes []*Container + // A reference of the PodStatus to be used + Statuses *PodStatus } // PodPair contains both runtime#Pod and api#Pod diff --git a/pkg/kubelet/container/sync_result.go b/pkg/kubelet/container/sync_result.go index 0d45633030795..a8a1001df745a 100644 --- a/pkg/kubelet/container/sync_result.go +++ b/pkg/kubelet/container/sync_result.go @@ -35,13 +35,14 @@ var ( ) var ( - ErrRunContainer = errors.New("RunContainerError") - ErrKillContainer = errors.New("KillContainerError") - ErrVerifyNonRoot = errors.New("VerifyNonRootError") - ErrRunInitContainer = errors.New("RunInitContainerError") - ErrCreatePodSandbox = errors.New("CreatePodSandboxError") - ErrConfigPodSandbox = errors.New("ConfigPodSandboxError") - ErrKillPodSandbox = errors.New("KillPodSandboxError") + ErrRunContainer = errors.New("RunContainerError") + ErrKillContainer = errors.New("KillContainerError") + ErrVerifyNonRoot = errors.New("VerifyNonRootError") + ErrRunInitContainer = errors.New("RunInitContainerError") + ErrCreatePodSandbox = errors.New("CreatePodSandboxError") + ErrConfigPodSandbox = errors.New("ConfigPodSandboxError") + ErrKillPodSandbox = errors.New("KillPodSandboxError") + ErrRunDeferContainer = errors.New("RunDeferContainerError") ) var ( @@ -62,6 +63,7 @@ const ( CreatePodSandbox SyncAction = "CreatePodSandbox" ConfigPodSandbox SyncAction = "ConfigPodSandbox" KillPodSandbox SyncAction = "KillPodSandbox" + DeferContainer SyncAction = "DeferContainer" ) // SyncResult is the result of sync action. diff --git a/pkg/kubelet/container/testing/fake_runtime.go b/pkg/kubelet/container/testing/fake_runtime.go index 3019d30094e90..7196b6a35fe29 100644 --- a/pkg/kubelet/container/testing/fake_runtime.go +++ b/pkg/kubelet/container/testing/fake_runtime.go @@ -256,7 +256,7 @@ func (f *FakeRuntime) SyncPod(pod *v1.Pod, _ v1.PodStatus, _ *PodStatus, _ []v1. return } -func (f *FakeRuntime) KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error { +func (f *FakeRuntime) KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64, _ []v1.Secret) error { f.Lock() defer f.Unlock() diff --git a/pkg/kubelet/container/testing/runtime_mock.go b/pkg/kubelet/container/testing/runtime_mock.go index 7adbccae3ca89..e5f09c89d2248 100644 --- a/pkg/kubelet/container/testing/runtime_mock.go +++ b/pkg/kubelet/container/testing/runtime_mock.go @@ -70,7 +70,7 @@ func (r *Mock) SyncPod(pod *v1.Pod, apiStatus v1.PodStatus, status *PodStatus, s return args.Get(0).(PodSyncResult) } -func (r *Mock) KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error { +func (r *Mock) KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64, secrets []v1.Secret) error { args := r.Called(pod, runningPod, gracePeriodOverride) return args.Error(0) } diff --git a/pkg/kubelet/dockershim/docker_container.go b/pkg/kubelet/dockershim/docker_container.go index 453c18c75663e..a7fe61ccd0362 100644 --- a/pkg/kubelet/dockershim/docker_container.go +++ b/pkg/kubelet/dockershim/docker_container.go @@ -250,6 +250,15 @@ func (ds *dockerService) StartContainer(containerID string) error { return nil } +// Wait for a Container +func (ds *dockerService) WaitForContainer(containerID string) error { + err := ds.client.WaitForContainer(containerID) + if err != nil { + return fmt.Errorf("failed to wait for the container %q: %v", containerID, err) + } + return nil +} + // StopContainer stops a running container with a grace period (i.e., timeout). func (ds *dockerService) StopContainer(containerID string, timeout int64) error { return ds.client.StopContainer(containerID, time.Duration(timeout)*time.Second) diff --git a/pkg/kubelet/dockershim/libdocker/client.go b/pkg/kubelet/dockershim/libdocker/client.go index 0400bbb917908..a97c652aa09bb 100644 --- a/pkg/kubelet/dockershim/libdocker/client.go +++ b/pkg/kubelet/dockershim/libdocker/client.go @@ -71,6 +71,7 @@ type Interface interface { ResizeContainerTTY(id string, height, width uint) error ResizeExecTTY(id string, height, width uint) error GetContainerStats(id string) (*dockertypes.StatsJSON, error) + WaitForContainer(id string) error } // Get a *dockerapi.Client, either using the endpoint passed in, or using diff --git a/pkg/kubelet/dockershim/libdocker/fake_client.go b/pkg/kubelet/dockershim/libdocker/fake_client.go index 529c9141991ea..c681225dddc83 100644 --- a/pkg/kubelet/dockershim/libdocker/fake_client.go +++ b/pkg/kubelet/dockershim/libdocker/fake_client.go @@ -579,6 +579,48 @@ func (f *FakeDockerClient) StartContainer(id string) error { return nil } +// WaitForContainer. +// This is similar to stopContainer, as wait is blocking and will return once the container finished execution. +func (f *FakeDockerClient) WaitForContainer(id string) error { + f.Lock() + defer f.Unlock() + f.appendCalled(calledDetail{name: "wait"}) + if err := f.popError("wait"); err != nil { + return err + } + f.appendContainerTrace("Waiting", id) + // Container status should be Updated before container moved to ExitedContainerList + f.updateContainerStatus(id, StatusExitedPrefix) + var newList []dockertypes.Container + for _, container := range f.RunningContainerList { + if container.ID == id { + // The newest exited container should be in front. Because we assume so in GetPodStatus() + f.ExitedContainerList = append([]dockertypes.Container{container}, f.ExitedContainerList...) + continue + } + newList = append(newList, container) + } + + f.RunningContainerList = newList + container, ok := f.ContainerMap[id] + if !ok { + container = convertFakeContainer(&FakeContainer{ + ID: id, + Name: id, + Running: false, + StartedAt: time.Now().Add(-time.Second), + FinishedAt: time.Now(), + }) + } else { + container.State.FinishedAt = dockerTimestampToString(f.Clock.Now()) + container.State.Running = false + } + + f.ContainerMap[id] = container + f.normalSleep(200, 50, 50) + return nil +} + // StopContainer is a test-spy implementation of Interface.StopContainer. // It adds an entry "stop" to the internal method call record. func (f *FakeDockerClient) StopContainer(id string, timeout time.Duration) error { @@ -600,6 +642,7 @@ func (f *FakeDockerClient) StopContainer(id string, timeout time.Duration) error } newList = append(newList, container) } + f.RunningContainerList = newList container, ok := f.ContainerMap[id] if !ok { @@ -614,6 +657,7 @@ func (f *FakeDockerClient) StopContainer(id string, timeout time.Duration) error container.State.FinishedAt = dockerTimestampToString(f.Clock.Now()) container.State.Running = false } + f.ContainerMap[id] = container f.normalSleep(200, 50, 50) return nil diff --git a/pkg/kubelet/dockershim/libdocker/instrumented_client.go b/pkg/kubelet/dockershim/libdocker/instrumented_client.go index 78a0d69648124..f8832977db6bb 100644 --- a/pkg/kubelet/dockershim/libdocker/instrumented_client.go +++ b/pkg/kubelet/dockershim/libdocker/instrumented_client.go @@ -101,6 +101,15 @@ func (in instrumentedInterface) StartContainer(id string) error { return err } +func (in instrumentedInterface) WaitForContainer(id string) error { + const operation = "waitFor_container" + defer recordOperation(operation, time.Now()) + + err := in.client.WaitForContainer(id) + recordError(operation, err) + return err +} + func (in instrumentedInterface) StopContainer(id string, timeout time.Duration) error { const operation = "stop_container" defer recordOperation(operation, time.Now()) diff --git a/pkg/kubelet/dockershim/libdocker/kube_docker_client.go b/pkg/kubelet/dockershim/libdocker/kube_docker_client.go index 0cc0c3ae03975..c081c660fbb77 100644 --- a/pkg/kubelet/dockershim/libdocker/kube_docker_client.go +++ b/pkg/kubelet/dockershim/libdocker/kube_docker_client.go @@ -261,6 +261,23 @@ func (d *kubeDockerClient) ListImages(opts dockertypes.ImageListOptions) ([]dock return images, nil } +func (d *kubeDockerClient) WaitForContainer(id string) error { + ctx, cancel := d.getTimeoutContext() + defer cancel() + waitCh, errCh := d.client.ContainerWait(ctx, id, dockercontainer.WaitConditionNextExit) + if ctxErr := contextError(ctx); ctxErr != nil { + return ctxErr + } + //Wait either for the container to finish its execution. + select { + case <-waitCh: + break + case err := <-errCh: + return err + } + return nil +} + func base64EncodeAuth(auth dockertypes.AuthConfig) (string, error) { var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(auth); err != nil { diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index fab8dcefbb676..66532c803f75d 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -1439,6 +1439,10 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { podStatus := o.podStatus updateType := o.updateType + // Fetch the pull secrets for the pod, get the pull Secrets a little early than usual as we may want to + // supply it to killPod. + pullSecrets := kl.getPullSecretsForPod(pod) + // if we want to kill a pod, do it now! if updateType == kubetypes.SyncPodKill { killPodOptions := o.killPodOptions @@ -1448,7 +1452,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { apiPodStatus := killPodOptions.PodStatusFunc(pod, podStatus) kl.statusManager.SetPodStatus(pod, apiPodStatus) // we kill the pod with the specified grace period since this is a termination - if err := kl.killPod(pod, nil, podStatus, killPodOptions.PodTerminationGracePeriodSecondsOverride); err != nil { + if err := kl.killPod(pod, nil, podStatus, killPodOptions.PodTerminationGracePeriodSecondsOverride, pullSecrets); err != nil { kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err) // there was an error killing the pod, so we return that error directly utilruntime.HandleError(err) @@ -1515,7 +1519,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { // Kill pod if it should not be running if !runnable.Admit || pod.DeletionTimestamp != nil || apiPodStatus.Phase == v1.PodFailed { var syncErr error - if err := kl.killPod(pod, nil, podStatus, nil); err != nil { + if err := kl.killPod(pod, nil, podStatus, nil, pullSecrets); err != nil { kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err) syncErr = fmt.Errorf("error killing pod: %v", err) utilruntime.HandleError(syncErr) @@ -1557,7 +1561,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { // exists or the pod is running for the first time podKilled := false if !pcm.Exists(pod) && !firstSync { - if err := kl.killPod(pod, nil, podStatus, nil); err == nil { + if err := kl.killPod(pod, nil, podStatus, nil, pullSecrets); err == nil { podKilled = true } } @@ -1627,9 +1631,6 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { } } - // Fetch the pull secrets for the pod - pullSecrets := kl.getPullSecretsForPod(pod) - // Call the container runtime's SyncPod callback result := kl.containerRuntime.SyncPod(pod, apiPodStatus, podStatus, pullSecrets, kl.backOff) kl.reasonCache.Update(pod.UID, result) diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 5e365aa54a8e6..6d537f69c79f1 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -809,7 +809,7 @@ func containerResourceRuntimeValue(fs *v1.ResourceFieldSelector, pod *v1.Pod, co // One of the following arguments must be non-nil: runningPod, status. // TODO: Modify containerRuntime.KillPod() to accept the right arguments. -func (kl *Kubelet) killPod(pod *v1.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64) error { +func (kl *Kubelet) killPod(pod *v1.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64, pullSecrets []v1.Secret) error { var p kubecontainer.Pod if runningPod != nil { p = *runningPod @@ -820,7 +820,7 @@ func (kl *Kubelet) killPod(pod *v1.Pod, runningPod *kubecontainer.Pod, status *k } // Call the container runtime KillPod method which stops all running containers of the pod - if err := kl.containerRuntime.KillPod(pod, p, gracePeriodOverride); err != nil { + if err := kl.containerRuntime.KillPod(pod, p, gracePeriodOverride, pullSecrets); err != nil { return err } if err := kl.containerManager.UpdateQOSCgroups(); err != nil { @@ -1095,8 +1095,9 @@ func (kl *Kubelet) podKiller() { if !exists { go func(apiPod *v1.Pod, runningPod *kubecontainer.Pod) { + pullSecretes := kl.getPullSecretsForPod(apiPod) glog.V(2).Infof("Killing unwanted pod %q", runningPod.Name) - err := kl.killPod(apiPod, runningPod, nil, nil) + err := kl.killPod(apiPod, runningPod, nil, nil, pullSecretes) if err != nil { glog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err) } @@ -1422,6 +1423,13 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontaine len(pod.Spec.InitContainers) > 0, true, ) + apiPodStatus.DeferContainerStatuses = kl.convertToAPIContainerStatuses( + pod, podStatus, + pod.Status.DeferContainerStatuses, + pod.Spec.DeferContainers, + len(pod.Spec.DeferContainers) > 0, + true, + ) return &apiPodStatus } diff --git a/pkg/kubelet/kuberuntime/instrumented_services.go b/pkg/kubelet/kuberuntime/instrumented_services.go index a408f67e0ec0c..8259b2458ab01 100644 --- a/pkg/kubelet/kuberuntime/instrumented_services.go +++ b/pkg/kubelet/kuberuntime/instrumented_services.go @@ -95,6 +95,15 @@ func (in instrumentedRuntimeService) StartContainer(containerID string) error { return err } +func (in instrumentedRuntimeService) WaitForContainer(containerID string) error { + const operation = "waitFor_container" + defer recordOperation(operation, time.Now()) + + err := in.service.WaitForContainer(containerID) + recordError(operation, err) + return err +} + func (in instrumentedRuntimeService) StopContainer(containerID string, timeout int64) error { const operation = "stop_container" defer recordOperation(operation, time.Now()) diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container.go b/pkg/kubelet/kuberuntime/kuberuntime_container.go index 1b622b2ca66d3..abd3957f566f9 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -79,6 +79,23 @@ func (m *kubeGenericRuntimeManager) recordContainerEvent(pod *v1.Pod, container m.recorder.Event(ref, eventType, reason, eventMessage) } +//startContainerAndWait starts a container and waits until the container completes its execution. +//This is a blocking call, the calling goroutine will be blocked. until the container is created and finishes execution. +func (m *kubeGenericRuntimeManager) startContainerAndWait(podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string) (string, error) { + + containerID, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP) + if err != nil { + return containerID, err + } + + err = m.runtimeService.WaitForContainer(containerID) + if err != nil { + return containerID, err + } + return containerID, nil + +} + // startContainer starts a container and returns a message indicates why it is failed on error. // It starts the container through the following steps: // * pull the image @@ -169,7 +186,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb } } - return "", nil + return containerID, nil } // generateContainerConfig generates container config for kubelet runtime v1. @@ -550,11 +567,43 @@ func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID return pod, container, nil } +func calculateGracePeriod(pod *v1.Pod) int64 { + + gracePeriod := int64(minimumGracePeriodInSeconds) + switch { + case pod.DeletionGracePeriodSeconds != nil: + gracePeriod = *pod.DeletionGracePeriodSeconds + case pod.Spec.TerminationGracePeriodSeconds != nil: + gracePeriod = *pod.Spec.TerminationGracePeriodSeconds + } + + return gracePeriod +} + +func updateGracePeriod(pod *v1.Pod, gracePeriod int64) { + if pod.DeletionGracePeriodSeconds == nil && pod.Spec.TerminationGracePeriodSeconds == nil { + pod.Spec.TerminationGracePeriodSeconds = &gracePeriod + return + } + + //Otherwise update the one that is configured + switch { + case pod.DeletionGracePeriodSeconds != nil: + *pod.DeletionGracePeriodSeconds = gracePeriod + case pod.Spec.TerminationGracePeriodSeconds != nil: + *pod.Spec.TerminationGracePeriodSeconds = gracePeriod + } + +} + // killContainer kills a container through the following steps: // * Run the pre-stop lifecycle hooks (if applicable). // * Stop the container. +// * If DeferContainers were configured PreStopHook will be skipped. func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubecontainer.ContainerID, containerName string, reason string, gracePeriodOverride *int64) error { + var containerSpec *v1.Container + if pod != nil { if containerSpec = kubecontainer.GetContainerSpec(pod, containerName); containerSpec == nil { return fmt.Errorf("failed to get containerSpec %q(id=%q) in pod %q when killing container for reason %q", @@ -569,13 +618,13 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec pod, containerSpec = restoredPod, restoredContainer } - // From this point , pod and container must be non-nil. - gracePeriod := int64(minimumGracePeriodInSeconds) - switch { - case pod.DeletionGracePeriodSeconds != nil: - gracePeriod = *pod.DeletionGracePeriodSeconds - case pod.Spec.TerminationGracePeriodSeconds != nil: - gracePeriod = *pod.Spec.TerminationGracePeriodSeconds + // From this point , pod and container must be non- + gracePeriod := calculateGracePeriod(pod) + + if len(pod.Spec.DeferContainers) > 0 { + //if defer Containers are configured do not run PreStopHooks + //Also make graceperiod to 0 so that the containers are killed immediately also we skip preStophook + gracePeriod = 0 } glog.V(2).Infof("Killing container %q with %d second grace period", containerID.String(), gracePeriod) @@ -589,10 +638,12 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec if containerSpec.Lifecycle != nil && containerSpec.Lifecycle.PreStop != nil && gracePeriod > 0 { gracePeriod = gracePeriod - m.executePreStopHook(pod, containerID, containerSpec, gracePeriod) } + // always give containers a minimal shutdown window to avoid unnecessary SIGKILLs if gracePeriod < minimumGracePeriodInSeconds { gracePeriod = minimumGracePeriodInSeconds } + if gracePeriodOverride != nil { gracePeriod = *gracePeriodOverride glog.V(3).Infof("Killing container %q, but using %d second grace period override", containerID, gracePeriod) @@ -615,11 +666,126 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec return err } +// runWaitandRetryContainers this function will try to execute deferContainers if the container is already running it will +// atach to it, if the container failed it will re-run until it succeeds. +func (m *kubeGenericRuntimeManager) runWaitandRetryContainers(podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string, attach bool, cStatus *kubecontainer.ContainerStatus) (*runtimeapi.ContainerStatus, error) { + + var cID string + var err error + var resultStatus *runtimeapi.ContainerStatus + + for { + if attach { + cID = cStatus.ID.ID + //The container is already running just attach to it for now + err = m.runtimeService.WaitForContainer(cID) + //reset the flag, if we are re-running the container in the loop + attach = false + } else { + cID, err = m.startContainerAndWait(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP) + } + resultStatus, _ = m.runtimeService.ContainerStatus(cID) + if err == nil { + //the Container finished successfully, + break + } + // TODO: Log an error ror send and event that the container failed and we are retrying. + } + + return resultStatus, nil +} + +//runDeferContainers one after the other +func (m *kubeGenericRuntimeManager) runDeferContainers(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64, pullSecrets []v1.Secret) ([]*kubecontainer.SyncResult, int64) { + + var syncResults []*kubecontainer.SyncResult + done := make(chan bool) + + gracePeriod := calculateGracePeriod(pod) + + statuses := runningPod.Statuses + podIP := statuses.IP + + if len(statuses.ContainerStatuses) <= 0 { + // Need a valid SandBox at this point + // TODO: Log or throw an error + return syncResults, gracePeriod + } + + //Get the latest Sandbox info + attempt := statuses.SandboxStatuses[0].Metadata.Attempt + sandboxID := statuses.SandboxStatuses[0].Id + sandboxConfig, err := m.generatePodSandboxConfig(pod, attempt) + if err != nil { + return syncResults, gracePeriod + } + + for _, container := range pod.Spec.DeferContainers { + // Check if the container already has some status updated + cStatus := statuses.FindContainerStatusByName(container.Name) + if cStatus != nil { + // If the container is running attach to it + switch cStatus.State { + case kubecontainer.ContainerStateRunning: + // Wait for the container + go func() { + m.runWaitandRetryContainers(sandboxID, sandboxConfig, &container, pod, statuses, pullSecrets, podIP, true, cStatus) + done <- true + }() + case kubecontainer.ContainerStateExited: + if isContainerFailed(cStatus) { + //restart it the container had failed + go func() { + m.runWaitandRetryContainers(sandboxID, sandboxConfig, &container, pod, statuses, pullSecrets, podIP, false, nil) + done <- true + }() + + } else { + //This container finished execution, go to the next one + continue + } + } + } else { + // If the container is not running then create it and attach to it + go func() { + m.runWaitandRetryContainers(sandboxID, sandboxConfig, &container, pod, statuses, pullSecrets, podIP, false, nil) + done <- true + }() + } + + startTime := metav1.Now() + select { + + case <-time.After(time.Duration(gracePeriod) * time.Second): + glog.V(3).Infof("deferContainer: Exhaused gracePeriod") + //We exhausted all of the gracePeriod proceed to Kill + break + case <-done: + gracePeriod = gracePeriod - int64(metav1.Now().Sub(startTime.Time).Seconds()) + syncResult := kubecontainer.NewSyncResult(kubecontainer.DeferContainer, container.Name) + syncResults = append(syncResults, syncResult) + glog.V(3).Infof("deferContainer: Container %s finsihed execution", container.Name) + + } + } + + //update the grace period to 0 as to indicate the pod to be delete immediately + updateGracePeriod(pod, 0) + return syncResults, gracePeriod +} + // killContainersWithSyncResult kills all pod's containers with sync results. -func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (syncResults []*kubecontainer.SyncResult) { +func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64, pullSecrets []v1.Secret) (syncResults []*kubecontainer.SyncResult) { + + //If deferContainers are configured run them first only if the Pod is in running state + if len(pod.Spec.DeferContainers) > 0 && pod.Status.Phase == v1.PodRunning { + deferContainerResults, _ := m.runDeferContainers(pod, runningPod, gracePeriodOverride, pullSecrets) + syncResults = append(syncResults, deferContainerResults...) + } + containerResults := make(chan *kubecontainer.SyncResult, len(runningPod.Containers)) - wg := sync.WaitGroup{} + wg := sync.WaitGroup{} wg.Add(len(runningPod.Containers)) for _, container := range runningPod.Containers { go func(container *kubecontainer.Container) { diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/pkg/kubelet/kuberuntime/kuberuntime_manager.go index 06d91b7540ba9..d54735f458c81 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -376,6 +376,8 @@ type podActions struct { // the key is the container ID of the container, while // the value contains necessary information to kill a container. ContainersToKill map[kubecontainer.ContainerID]containerToKillInfo + // Images of DeferContainers that will need to be pre-populated, this will be a map of im + DeferContainerImagesToPrePull map[string]string } // podSandboxChanged checks whether the spec of the pod is changed and returns @@ -449,6 +451,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku Attempt: attempt, ContainersToStart: []int{}, ContainersToKill: make(map[kubecontainer.ContainerID]containerToKillInfo), + DeferContainerImagesToPrePull: make(map[string]string), } // If we need to (re-)create the pod sandbox, everything will need to be @@ -554,6 +557,17 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku changes.KillPod = true } + // Check if we can pull deferContianers images (if its not available) while the POD is running + if pod.Spec.DeferContainers != nil { + for _, container := range pod.Spec.DeferContainers { + if _, ok := changes.DeferContainerImagesToPrePull[container.Image]; !ok { + //This seems to be a new image and is part of defer containers and needs to be pulled. + changes.DeferContainerImagesToPrePull[container.Image], _ = m.GetImageRef(kubecontainer.ImageSpec{Image: container.Image}) + } + + } + } + return changes } @@ -565,6 +579,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku // 4. Create sandbox if necessary. // 5. Create init containers. // 6. Create normal containers. +// 7. Pull DeferContainers func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { // Step 1: Compute sandbox and container changes. podContainerChanges := m.computePodActions(pod, podStatus) @@ -589,7 +604,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat glog.V(4).Infof("Stopping PodSandbox for %q, will start new one", format.Pod(pod)) } - killResult := m.killPodWithSyncResult(pod, kubecontainer.ConvertPodStatusToRunningPod(m.runtimeName, podStatus), nil) + killResult := m.killPodWithSyncResult(pod, kubecontainer.ConvertPodStatusToRunningPod(m.runtimeName, podStatus), nil, pullSecrets) result.AddPodSyncResult(killResult) if killResult.Error() != nil { glog.Errorf("killPodWithSyncResult failed: %v", killResult.Error()) @@ -737,6 +752,24 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat } } + //Step 7: If the Pods is in running phase pre-populate deferContainer images + if pod.Status.Phase == v1.PodRunning && len(podContainerChanges.DeferContainerImagesToPrePull) > 0 { + for idx, container := range pod.Spec.DeferContainers { + if imageRef, ok := podContainerChanges.DeferContainerImagesToPrePull[container.Image]; ok && imageRef == "" { + //This is a valid entry but image is not pulled yet. + ref, referr := kubecontainer.GenerateContainerRef(pod, &pod.Spec.DeferContainers[idx]) + if referr == nil { + m.recorder.Eventf(ref, v1.EventTypeNormal, events.PullingImage, "Defer-Container image is being pre-pulled") + } + imageRef, message, err := m.imagePuller.EnsureImageExists(pod, &pod.Spec.DeferContainers[idx], pullSecrets) + if err != nil { + glog.V(4).Infof("Unable to pull the image err=%v messege=%v deferContainers(%+v) pod(%v)", err, message, container, format.Pod(pod)) + continue + } + podContainerChanges.DeferContainerImagesToPrePull[container.Image] = imageRef + } + } + } return } @@ -777,15 +810,15 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Contain // gracePeriodOverride if specified allows the caller to override the pod default grace period. // only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data. // it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios. -func (m *kubeGenericRuntimeManager) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error { - err := m.killPodWithSyncResult(pod, runningPod, gracePeriodOverride) +func (m *kubeGenericRuntimeManager) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64, pullSecrets []v1.Secret) error { + err := m.killPodWithSyncResult(pod, runningPod, gracePeriodOverride, pullSecrets) return err.Error() } // killPodWithSyncResult kills a runningPod and returns SyncResult. // Note: The pod passed in could be *nil* when kubelet restarted. -func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) { - killContainerResults := m.killContainersWithSyncResult(pod, runningPod, gracePeriodOverride) +func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64, pullSecrets []v1.Secret) (result kubecontainer.PodSyncResult) { + killContainerResults := m.killContainersWithSyncResult(pod, runningPod, gracePeriodOverride, pullSecrets) for _, containerResult := range killContainerResults { result.AddSyncResult(containerResult) } diff --git a/pkg/kubelet/pod_workers.go b/pkg/kubelet/pod_workers.go index 6a8a33353f553..54b9fd141fa74 100644 --- a/pkg/kubelet/pod_workers.go +++ b/pkg/kubelet/pod_workers.go @@ -152,6 +152,7 @@ func newPodWorkers(syncPodFn syncPodFnType, recorder record.EventRecorder, workQ func (p *podWorkers) managePodLoop(podUpdates <-chan UpdatePodOptions) { var lastSyncTime time.Time + for update := range podUpdates { err := func() error { podUID := update.Pod.UID diff --git a/pkg/kubelet/remote/remote_runtime.go b/pkg/kubelet/remote/remote_runtime.go index 3560e3c9b2979..120920b4942da 100644 --- a/pkg/kubelet/remote/remote_runtime.go +++ b/pkg/kubelet/remote/remote_runtime.go @@ -213,6 +213,12 @@ func (r *RemoteRuntimeService) StartContainer(containerID string) error { return nil } +// WaitForContainer Waits until a container terminates. Just a dummy implementation +func (r *RemoteRuntimeService) WaitForContainer(containerID string) error { + + return nil +} + // StopContainer stops a running container with a grace period (i.e., timeout). func (r *RemoteRuntimeService) StopContainer(containerID string, timeout int64) error { // Use timeout + default timeout (2 minutes) as timeout to leave extra time diff --git a/pkg/kubelet/rkt/rkt.go b/pkg/kubelet/rkt/rkt.go index b35288b4aab81..479899798fcf9 100644 --- a/pkg/kubelet/rkt/rkt.go +++ b/pkg/kubelet/rkt/rkt.go @@ -1457,7 +1457,7 @@ func (r *Runtime) RunPod(pod *v1.Pod, pullSecrets []v1.Secret) error { // This is a temporary solution until we have a clean design on how // kubelet handles events. See https://github.com/kubernetes/kubernetes/issues/23084. if err := r.runLifecycleHooks(pod, runtimePod, lifecyclePostStartHook); err != nil { - if errKill := r.KillPod(pod, *runtimePod, nil); errKill != nil { + if errKill := r.KillPod(pod, *runtimePod, nil, pullSecrets); errKill != nil { return errors.NewAggregate([]error{err, errKill}) } r.cleanupPodNetwork(pod, networkNamespace) @@ -1733,7 +1733,7 @@ func (r *Runtime) waitPreStopHooks(pod *v1.Pod, runningPod *kubecontainer.Pod) { // KillPod invokes 'systemctl kill' to kill the unit that runs the pod. // TODO: add support for gracePeriodOverride which is used in eviction scenarios -func (r *Runtime) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error { +func (r *Runtime) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64, pullSecrets []v1.Secret) error { glog.V(4).Infof("Rkt is killing pod: name %q.", runningPod.Name) if len(runningPod.Containers) == 0 { @@ -1863,7 +1863,7 @@ func (r *Runtime) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecontainer. if restartPod { // Kill the pod only if the pod is actually running. if len(runningPod.Containers) > 0 { - if err = r.KillPod(pod, runningPod, nil); err != nil { + if err = r.KillPod(pod, runningPod, nil, pullSecrets); err != nil { return } } diff --git a/pkg/kubelet/rktshim/app-interface.go b/pkg/kubelet/rktshim/app-interface.go index be01bac97fe71..c15534d9dc4c3 100644 --- a/pkg/kubelet/rktshim/app-interface.go +++ b/pkg/kubelet/rktshim/app-interface.go @@ -45,6 +45,11 @@ func (*Runtime) StartContainer(string) error { panic("not implemented") } +// WaitForContainer starts a created app. +func (*Runtime) WaitForContainer(string) error { + panic("not implemented") +} + // StopContainer stops a running app with a grace period (i.e. timeout). func (*Runtime) StopContainer(string, int64) error { panic("not implemented") diff --git a/pkg/kubelet/rktshim/fake-app-interface.go b/pkg/kubelet/rktshim/fake-app-interface.go index b0e20d55535cd..4da99c70d59c4 100644 --- a/pkg/kubelet/rktshim/fake-app-interface.go +++ b/pkg/kubelet/rktshim/fake-app-interface.go @@ -148,6 +148,27 @@ func (r *FakeRuntime) StartContainer(id string) error { return nil } +func (r *FakeRuntime) WaitForContainer(id string) error { + c, ok := r.Containers[id] + if !ok { + return ErrContainerNotFound + } + switch c.State { + case runtimeapi.ContainerState_CONTAINER_EXITED: + fallthrough + case runtimeapi.ContainerState_CONTAINER_CREATED: + c.Start() + case runtimeapi.ContainerState_CONTAINER_UNKNOWN: + // TODO(tmrts): add timeout to Start API or generalize timeout somehow + //<-time.After(time.Duration(timeout) * time.Second) + fallthrough + default: + return ErrInvalidContainerStateTransition + } + + return nil +} + func (r *FakeRuntime) StopContainer(id string, timeout int64) error { c, ok := r.Containers[id] if !ok { diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index ce44f0ed35300..f6b25671c7c7d 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -2714,6 +2714,16 @@ const ( TolerationOpEqual TolerationOperator = "Equal" ) + +const ( + // This annotation key will contain an array of JSON encoded containers for deferContainers, + // The is key is only recognized by version >= 1.7. + PodDeferContainersAnnotationKey = "pod.alpha.kubernetes.io/defer-containers" + // This annotation key will contain an array of JSON encoded containers for deferContainerStatues + // This will be recognized by version >= 1.7 + PodDeferContainersStatusesKey = "pod.alpha.kubernetes.io/defer-container-statuses" +) + // PodSpec is a description of a pod. type PodSpec struct { // List of volumes that can be mounted by containers belonging to the pod. @@ -2745,6 +2755,10 @@ type PodSpec struct { // +patchMergeKey=name // +patchStrategy=merge Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` + //List of termination Containers, those will be executed when during the TerminationGracePeriod of the pod + // +patchMergeKey=name + // +patchStrategy=merge + DeferContainers []Container `json:"deferContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,26,rep,name=deferContainers"` // Restart policy for all containers within the pod. // One of Always, OnFailure, Never. // Default to Always. @@ -3003,12 +3017,14 @@ type PodStatus struct { // startTime set. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"` - // The list has one entry per container in the manifest. Each entry is currently the output // of `docker inspect`. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status // +optional ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` + // This list has one entry per defer Contianer in the manifest. These containers wont be run until the pod is being + // terminated, we need ready but in waiting stage, will be updated with start time as and when they are started. + DeferContainerStatuses []ContainerStatus `json:"deferContainerStatuses,omitempty" protobuf:"bytes,11,rep,name=deferContainerStatuses"` // The Quality of Service (QOS) classification assigned to the pod based on resource requirements // See PodQOSClass type for available QOS classes // More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md diff --git a/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go index b81d8eff89139..d1d38836d5a29 100644 --- a/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -3831,6 +3831,13 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.DeferContainers != nil { + in, out := &in.DeferContainers, &out.DeferContainers + *out = make([]Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.TerminationGracePeriodSeconds != nil { in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds if *in == nil { @@ -3966,6 +3973,13 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.DeferContainerStatuses != nil { + in, out := &in.DeferContainerStatuses, &out.DeferContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return }