Skip to content

Commit

Permalink
Including tasks to perform periodic target failure test on cstor volu…
Browse files Browse the repository at this point in the history
…me (openebs#1868)

* Including tasks to perform periodic target failure test on cstor volume

Signed-off-by: gprasath <giridhara.prasad@cloudbyte.com>

* Set variable for operator namespace and modified the task names

Signed-off-by: gprasath <giridhara.prasad@cloudbyte.com>

* Setting different values to variables based on the selected storage engine

Signed-off-by: gprasath <giridhara.prasad@cloudbyte.com>

* Fixing typo in the test steps

Signed-off-by: gprasath <giridhara.prasad@cloudbyte.com>
  • Loading branch information
gprasath authored and dargasudarshan committed Aug 30, 2018
1 parent 4baa1cf commit 53bc599
Showing 1 changed file with 86 additions and 43 deletions.
Original file line number Diff line number Diff line change
@@ -1,16 +1,15 @@
# test-ctrl-failure.yml
# Author: Karthik
# Description: Include resiliency test suite in OpenEBS e2e (Test: periodic controller failures)
# Description: Include resiliency test suite in OpenEBS e2e (Test: periodic controller/target pod failures)
###############################################################################################
#Test Steps:
#1. Copy Test artifacts to Kubemaster.
#2. Deploy Percona application with liveness probe running db queries continuously.
#3. Create chaoskube infrastructer to induce failures.
#4. Gather Node,Replica Container and chaoskube pod details required for test.
#5. Initiate periodic controller failures from chaoskube
#6. Check percona application status to veriy openebs replica sustain network failures.
#7. Verify successful Fault Injection via end marker
#8. Perform cleanup of test artifacts.
#3. Create chaoskube infrastructure to induce failures.
#4. Gather Node,Container/target and chaoskube pod details required for test.
#5. Initiate periodic pod failures from chaoskube
#6. Check percona application status to ensure that it is running after the failure.
#7. Perform cleanup of test artifacts.
###############################################################################################

- hosts: localhost
Expand All @@ -19,6 +18,7 @@
- test-ctrl-failure-vars.yml

tasks:

- block:

- include: test-ctrl-failure-prereq.yml
Expand All @@ -29,36 +29,38 @@
# confirm OpenEBS operator is ready for requests. #
###################################################

- name: Get percona spec and liveness scripts
- name: 1) Get percona spec and liveness scripts
get_url:
url: "{{ item }}"
dest: "{{ result_kube_home.stdout }}"
delegate_to: "{{ groups['kubernetes-kubemasters'].0 }}"
with_items: "{{ percona_links }}"

- name: Replace volume-claim name with test parameters
- name: 1b) Replace volume-claim name with test parameters
include_tasks: "{{utils_path}}/regex_task.yml"
vars:
path: "{{ result_kube_home.stdout }}/percona.yaml"
regex1: "{{replace_item}}"
regex2: "{{replace_with}}"

- name: Copy the chaoskube specs to kubemaster
- name: 1c) Copy the chaoskube specs to kubemaster
include_tasks: "{{utils_path}}/copy_task.yml"
vars:
destination_node: "{{groups['kubernetes-kubemasters'].0}}"
files_to_copy: "{{ chaoskube_files }}"

- name: Replace volume-claim name with test parameters
- name: 1d) Replace volume-claim name with test parameters
replace:
path: "{{ result_kube_home.stdout }}/rbac.yaml"
regexp: 'namespace: default'
replace: 'namespace: {{ namespace }}'
delegate_to: "{{ groups['kubernetes-kubemasters'].0 }}"

# Check if mayaapiserver is running

- include_tasks: "{{utils_path}}/deploy_check.yml"
vars:
ns: openebs
ns: "{{ operator_ns }}"
lkey: name
lvalue: maya-apiserver

Expand All @@ -68,7 +70,7 @@
# deploy percona w/ a liveness check for DB writes)#
####################################################

- name: Create test specific namespace
- name: 2) Create test specific namespace
include_tasks: "{{utils_path}}/namespace_task.yml"
vars:
status: create
Expand All @@ -79,26 +81,43 @@
app_yml: "{{ chaoskube_files }}"
ns: "{{ namespace }}"

- name: Check whether chaoskube infrastructure is created
- name: 2a) Check whether chaoskube infrastructure is created
include_tasks: "{{utils_path}}/deploy_check.yml"
vars:
ns: "{{ namespace }}"
lkey: app
lvalue: chaoskube


- name: Set chaoskube pod name to variable
- name: 2b) Set chaoskube pod name to variable
set_fact:
chaospod: "{{ result.stdout_lines[1].split()[0] }}"

- name: Create a configmap with the liveness sql script
- name: 2c) Create a configmap with the liveness sql script
shell: source ~/.profile; kubectl create configmap sqltest --from-file={{result_kube_home.stdout}}/{{ percona_files.1 }} -n {{ namespace }}
args:
executable: /bin/bash
delegate_to: "{{groups['kubernetes-kubemasters'].0}}"
register: result
failed_when: "'configmap' and 'created' not in result.stdout"

- name: 3) Replace storage-class in app yaml to use jiva based storage engine
replace:
path: "{{ result_kube_home.stdout }}/{{ percona_files.0 }}"
regexp: 'openebs-standard'
replace: '{{ jiva_sc }}'
delegate_to: "{{ groups['kubernetes-kubemasters'].0 }}"
when: storage_engine == 'jiva'

- name: 3a) Replace storage-class in app yaml to use cstor based storage engine
replace:
path: "{{ result_kube_home.stdout }}/{{ percona_files.0 }}"
regexp: 'openebs-standard'
replace: '{{ cstor_sc }}'
delegate_to: "{{ groups['kubernetes-kubemasters'].0 }}"
when: storage_engine == 'cStor'

# Deploy percona application using the selected storage engine.

- include_tasks: "{{utils_path}}/deploy_task.yml"
vars:
app_yml: "{{ percona_files.0 }}"
Expand All @@ -110,20 +129,42 @@
lkey: name
lvalue: percona

- name: 3b) Check if the cStor target has been created and running.
shell: source ~/.profile; kubectl get pods -n {{ operator_ns }} | grep {{ namespace }}
args:
executable: /bin/bash
register: cstor_pod
until: "'Running' in cstor_pod.stdout"
delay: 30
retries: 15
when: storage_engine == 'cStor'

- name: Wait for 120s to ensure liveness check starts
wait_for:
timeout: 120

- name: Get the name of the volume ctrl deployment
- name: Setting values to variables specific to jiva
set_fact:
ns: "{{ namespace }}"
pod_label: "openebs.io/controller=jiva-controller"
when: storage_engine == 'jiva'

- name: Setting values to variables specific to cstor
set_fact:
ns: "{{ operator_ns }}"
pod_label: "openebs.io/target=cstor-target"
when: storage_engine == 'cStor'

- name: 4) Get the name of the controller/target deployment
shell: >
source ~/.profile; kubectl get deployments -n {{ namespace }}
-l openebs.io/controller=jiva-controller --no-headers
source ~/.profile; kubectl get deployments -n {{ ns }}
-l {{ pod_label }} --no-headers
args:
executable: /bin/bash
delegate_to: "{{groups['kubernetes-kubemasters'].0}}"
register: result_deploy

- name: Set the ctrl deployment name to variable
- name: 4a) Set the ctrl deployment name to variable
set_fact:
ctrl_deploy: "{{ result_deploy.stdout_lines[0].split()[0] }}"

Expand All @@ -133,80 +174,82 @@
# failures, verify successful FI via end marker) #
########################################################

- name: Get the resourceVersion of the controller deployment
- name: 4b) Get the resourceVersion of the controller/target deployment
shell: >
source ~/.profile; kubectl get deploy -n {{ namespace }}
source ~/.profile; kubectl get deploy -n {{ ns }}
{{ ctrl_deploy }} -o yaml | grep resourceVersion
| awk '{print $2}' | sed 's|"||g'
args:
executable: /bin/bash
delegate_to: "{{groups['kubernetes-kubemasters'].0}}"
register: rv_bef

- name: Initiate periodic controller failures from chaoskube
- name: 5) Initiate periodic controller/target failure through chaoskube
shell: >
source ~/.profile; kubectl exec {{ chaospod }} -n {{ namespace }}
-- timeout -t {{ chaos_duration }} chaoskube
--labels 'openebs.io/controller=jiva-controller'
--labels '{{ pod_label }}'
--no-dry-run --interval={{ chaos_interval }}s
--debug
--namespaces={{ namespace }}
--namespaces={{ ns }}
args:
executable: /bin/bash
delegate_to: "{{groups['kubernetes-kubemasters'].0}}"
register: chaos_result
ignore_errors: true

- name: Get the resourceVersion of the controller deployment
- name: 5a) Get the resourceVersion of the controller/target deployment after recovery.
shell: >
source ~/.profile; kubectl get deploy -n {{ namespace }}
source ~/.profile; kubectl get deploy -n {{ ns }}
{{ ctrl_deploy }} -o yaml | grep resourceVersion
| awk '{print $2}' | sed 's|"||g'
args:
executable: /bin/bash
delegate_to: "{{groups['kubernetes-kubemasters'].0}}"
register: rv_aft

- name: Compare resourceVersions of controller deployments
- name: 5b) Compare resourceVersions of both current and previous deployment.
debug:
msg: "Verified controller pods were restarted by chaoskube"
failed_when: "rv_bef.stdout | int == rv_aft.stdout | int"

########################################################
# VERIFY RESILINCY/FAULT-TOLERATION #
# VERIFY RESILENCY/FAULT-TOLERATION #
# (Confirm liveness checks on percona are successful & #
# pod is still in running state) #
########################################################

- include_tasks: "{{utils_path}}/deploy_check.yml"
vars:
ns: "{{ namespace }}"
lkey: name
lvalue: percona

########################################################
# CLEANUP #
# (Tear down application, liveness configmap as well as#
# the FI (chaoskube) infrastructure. Also stop logger) #
########################################################

ns: "{{ namespace }}"
lkey: name
lvalue: percona

- name: Setting pass flag
set_fact:
flag: "Test Passed"
status: "good"
status_id: 1
flag: "Test Passed"
status: "good"
status_id: 1

rescue:

- name: Setting fail flag
set_fact:
flag: "Test Failed"
status: "danger"
status_id: 5

always:

- block:

########################################################
# CLEANUP #
# (Tear down application, liveness configmap as well as#
# the FI (chaoskube) infrastructure. Also stop logger) #
########################################################


- include: test-ctrl-failure-cleanup.yml

- name: Test Cleanup Passed
Expand Down

0 comments on commit 53bc599

Please sign in to comment.