This repository has been archived by the owner on Jun 13, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 104
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Move wait logic out of raw and into common and use that logic in scale Fix a few broken wait condition cases highlighted by scaling up and down Move scale-related tests into dedicated test task file Additional service related tests
- Loading branch information
1 parent
3bdfb47
commit beebe98
Showing
7 changed files
with
487 additions
and
171 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,214 @@ | ||
--- | ||
- block: | ||
- set_fact: | ||
scale_namespace: scale | ||
|
||
- name: Ensure namespace exists | ||
k8s: | ||
definition: | ||
apiVersion: v1 | ||
kind: Namespace | ||
metadata: | ||
name: "{{ scale_namespace }}" | ||
|
||
- name: Add a deployment | ||
k8s: | ||
definition: | ||
apiVersion: apps/v1 | ||
kind: Deployment | ||
metadata: | ||
name: scale-deploy | ||
namespace: "{{ scale_namespace }}" | ||
spec: | ||
replicas: 1 | ||
selector: | ||
matchLabels: | ||
app: "{{ k8s_pod_name }}" | ||
template: "{{ k8s_pod_template }}" | ||
wait: yes | ||
wait_timeout: 60 | ||
apply: yes | ||
vars: | ||
k8s_pod_name: scale-deploy | ||
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green | ||
k8s_pod_ports: | ||
- containerPort: 8080 | ||
name: http | ||
protocol: TCP | ||
|
||
- name: Get pods in scale-deploy | ||
k8s_info: | ||
kind: Pod | ||
label_selectors: | ||
- app=scale-deploy | ||
namespace: "{{ scale_namespace }}" | ||
field_selectors: | ||
- status.phase=Running | ||
|
||
- name: Scale the deployment | ||
k8s_scale: | ||
api_version: apps/v1 | ||
kind: Deployment | ||
name: scale-deploy | ||
namespace: "{{ scale_namespace }}" | ||
replicas: 0 | ||
wait: yes | ||
register: scale_down | ||
|
||
# It looks like the Deployment is updated to have the desired state *before* the pods are terminated | ||
# Wait a couple of seconds to allow the pods to at least get to Terminating state | ||
- name: Avoid race condition | ||
pause: | ||
seconds: 2 | ||
|
||
- name: Get pods in scale-deploy | ||
k8s_info: | ||
kind: Pod | ||
label_selectors: | ||
- app=scale-deploy | ||
namespace: "{{ scale_namespace }}" | ||
field_selectors: | ||
- status.phase=Running | ||
register: scale_down_deploy_pods | ||
|
||
- name: Ensure that scale down took effect | ||
assert: | ||
that: | ||
- scale_down is changed | ||
- '"duration" in scale_down' | ||
- scale_down.diff | ||
- scale_down_deploy_pods.resources | length == 0 | ||
|
||
- name: Reapply the earlier deployment | ||
k8s: | ||
definition: | ||
api_version: apps/v1 | ||
kind: Deployment | ||
metadata: | ||
name: scale-deploy | ||
namespace: "{{ scale_namespace }}" | ||
spec: | ||
replicas: 1 | ||
selector: | ||
matchLabels: | ||
app: "{{ k8s_pod_name }}" | ||
template: "{{ k8s_pod_template }}" | ||
wait: yes | ||
wait_timeout: 60 | ||
apply: yes | ||
vars: | ||
k8s_pod_name: scale-deploy | ||
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green | ||
k8s_pod_ports: | ||
- containerPort: 8080 | ||
name: http | ||
protocol: TCP | ||
register: reapply_after_scale | ||
|
||
- name: Get pods in scale-deploy | ||
k8s_info: | ||
kind: Pod | ||
label_selectors: | ||
- app=scale-deploy | ||
namespace: "{{ scale_namespace }}" | ||
field_selectors: | ||
- status.phase=Running | ||
register: scale_up_deploy_pods | ||
|
||
- name: Ensure that reapply after scale worked | ||
assert: | ||
that: | ||
- reapply_after_scale is changed | ||
- scale_up_deploy_pods.resources | length == 1 | ||
|
||
- name: Scale the deployment up | ||
k8s_scale: | ||
api_version: apps/v1 | ||
kind: Deployment | ||
name: scale-deploy | ||
namespace: "{{ scale_namespace }}" | ||
replicas: 2 | ||
wait: yes | ||
wait_timeout: 60 | ||
register: scale_up | ||
|
||
- name: Get pods in scale-deploy | ||
k8s_info: | ||
kind: Pod | ||
label_selectors: | ||
- app=scale-deploy | ||
field_selectors: | ||
- status.phase=Running | ||
namespace: "{{ scale_namespace }}" | ||
register: scale_up_further_deploy_pods | ||
|
||
- name: Ensure that scale up worked | ||
assert: | ||
that: | ||
- scale_up is changed | ||
- '"duration" in scale_up' | ||
- scale_up.diff | ||
- scale_up_further_deploy_pods.resources | length == 2 | ||
|
||
- name: Don't scale the deployment up | ||
k8s_scale: | ||
api_version: apps/v1 | ||
kind: Deployment | ||
name: scale-deploy | ||
namespace: "{{ scale_namespace }}" | ||
replicas: 2 | ||
wait: yes | ||
register: scale_up_noop | ||
|
||
- name: Get pods in scale-deploy | ||
k8s_info: | ||
kind: Pod | ||
label_selectors: | ||
- app=scale-deploy | ||
field_selectors: | ||
- status.phase=Running | ||
namespace: "{{ scale_namespace }}" | ||
register: scale_up_noop_pods | ||
|
||
- name: Ensure that no-op scale up worked | ||
assert: | ||
that: | ||
- scale_up_noop is not changed | ||
- not scale_up_noop.diff | ||
- scale_up_noop_pods.resources | length == 2 | ||
- '"duration" in scale_up_noop' | ||
|
||
- name: Scale deployment down without wait | ||
k8s_scale: | ||
api_version: apps/v1 | ||
kind: Deployment | ||
name: scale-deploy | ||
namespace: "{{ scale_namespace }}" | ||
replicas: 1 | ||
wait: no | ||
register: scale_down_no_wait | ||
|
||
- name: Ensure that scale down succeeds | ||
k8s_info: | ||
kind: Pod | ||
label_selectors: | ||
- app=scale-deploy | ||
namespace: "{{ scale_namespace }}" | ||
register: scale_down_no_wait_pods | ||
retries: 6 | ||
delay: 5 | ||
until: "{{ scale_down_no_wait_pods.resources | length == 1 }}" | ||
|
||
- name: Ensure that scale down without wait worked | ||
assert: | ||
that: | ||
- scale_down_no_wait is changed | ||
- scale_down_no_wait.diff | ||
- scale_down_no_wait_pods.resources | length == 1 | ||
|
||
always: | ||
- name: Remove namespace | ||
k8s: | ||
kind: Namespace | ||
name: "{{ scale_namespace }}" | ||
state: absent |
Oops, something went wrong.