From 94061395c88a6f2e933f79d4fddb82e610c347d2 Mon Sep 17 00:00:00 2001 From: litmusbot Date: Tue, 25 May 2021 09:52:40 +0000 Subject: [PATCH] 874433430: version upgraded for chaos-charts --- charts/generic/experiments.yaml | 2666 +++++++++++++++---------------- charts/openebs/experiments.yaml | 1232 +++++++------- 2 files changed, 1949 insertions(+), 1949 deletions(-) diff --git a/charts/generic/experiments.yaml b/charts/generic/experiments.yaml index a11cbfd..879c9b5 100644 --- a/charts/generic/experiments.yaml +++ b/charts/generic/experiments.yaml @@ -1,33 +1,43 @@ apiVersion: litmuschaos.io/v1alpha1 description: message: | - Restart node + Deletes a pod belonging to a deployment/statefulset/daemonset kind: ChaosExperiment metadata: - name: node-restart + name: pod-delete labels: - name: node-restart + name: pod-delete app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: latest spec: definition: - scope: Cluster + scope: Namespaced permissions: - apiGroups: - "" + - "apps" + - "apps.openshift.io" + - "argoproj.io" - "batch" - "litmuschaos.io" resources: + - "deployments" - "jobs" - "pods" - "pods/log" + - "replicationcontrollers" + - "deployments" + - "statefulsets" + - "daemonsets" + - "replicasets" + - "deploymentconfigs" + - "rollouts" - "pods/exec" - "events" - "chaosengines" - "chaosexperiments" - "chaosresults" - - "secrets" verbs: - "create" - "list" @@ -36,71 +46,144 @@ spec: - "update" - "delete" - "deletecollection" - - apiGroups: - - "" - resources: - - "nodes" - verbs: - - "get" - - "list" image: "litmuschaos/go-runner:latest" imagePullPolicy: Always args: - -c - - ./experiments -name node-restart + - ./experiments -name pod-delete command: - /bin/bash env: - - name: SSH_USER - value: 'root' - name: TOTAL_CHAOS_DURATION - value: '60' + value: '15' # Period to wait before and after injection of chaos in sec - name: RAMP_TIME value: '' - # PROVIDE THE LIB HERE - # ONLY LITMUS SUPPORTED + - name: FORCE + value: 'true' + + - name: CHAOS_INTERVAL + value: '5' + + ## percentage of total pods to target + - name: PODS_AFFECTED_PERC + value: '' + - name: LIB - value: 'litmus' + value: 'litmus' - # provide lib image - - name: LIB_IMAGE - value: "litmuschaos/go-runner:latest" - - # ENTER THE TARGET NODE NAME - - name: TARGET_NODE - value: '' - - - name: NODE_LABEL - value: '' - - # ENTER THE TARGET NODE IP - - name: TARGET_NODE_IP + - name: TARGET_PODS value: '' + ## it defines the sequence of chaos execution for multiple target pods + ## supported values: serial, parallel + - name: SEQUENCE + value: 'parallel' + labels: - name: node-restart + name: pod-delete app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: latest - secrets: - - name: id-rsa - mountPath: /mnt/ ---- --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Drain the node where application pod is scheduled + Deletes a pod belonging to a deployment/statefulset/daemonset kind: ChaosExperiment metadata: - name: node-drain + name: k8-pod-delete labels: - name: node-drain + name: k8-pod-delete + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: chaosexperiment + app.kubernetes.io/version: latest +spec: + definition: + scope: Namespaced + permissions: + - apiGroups: + - "" + - "apps" + - "batch" + - "litmuschaos.io" + resources: + - "deployments" + - "jobs" + - "pods" + - "configmaps" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "patch" + - "update" + - "delete" + - apiGroups: + - "" + resources: + - "nodes" + verbs : + - "get" + - "list" + image: "litmuschaos/chaostoolkit:latest" + args: + - -c + - python /app/chaos/chaostest/kubernetes/k8_wrapper.py; exit 0 + command: + - /bin/bash + env: + - name: CHAOSTOOLKIT_IN_POD + value: 'true' + + - name: FILE + value: 'pod-app-kill-count.json' + + - name: NAME_SPACE + value: '' + + - name: LABEL_NAME + value: '' + + - name: APP_ENDPOINT + value: '' + + - name: PERCENTAGE + value: '50' + + - name: REPORT + value: 'true' + + - name: REPORT_ENDPOINT + value: 'none' + + - name: TEST_NAMESPACE + value: 'default' + + + labels: + name: k8-pod-delete + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: experiment-job + app.kubernetes.io/version: latest + +--- +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Give a cpu spike on a node belonging to a deployment +kind: ChaosExperiment +metadata: + name: node-cpu-hog + labels: + name: node-cpu-hog app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: latest @@ -112,15 +195,12 @@ spec: - "" - "batch" - "litmuschaos.io" - - "apps" resources: - "jobs" - "pods" - - "events" - "pods/log" - "pods/exec" - - "daemonsets" - - "pods/eviction" + - "events" - "chaosengines" - "chaosexperiments" - "chaosresults" @@ -139,36 +219,54 @@ spec: verbs: - "get" - "list" - - "patch" image: "litmuschaos/go-runner:latest" imagePullPolicy: Always args: - -c - - ./experiments -name node-drain + - ./experiments -name node-cpu-hog command: - /bin/bash env: - - - name: TARGET_NODE + + - name: TOTAL_CHAOS_DURATION + value: '60' + + # Period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: '' + + ## ENTER THE NUMBER OF CORES OF CPU FOR CPU HOGGING + ## OPTIONAL VALUE IN CASE OF EMPTY VALUE IT WILL TAKE NODE CPU CAPACITY + - name: NODE_CPU_CORE + value: '' + + # ENTER THE COMMA SEPARATED TARGET NODES NAME + - name: TARGET_NODES value: '' - name: NODE_LABEL value: '' - - name: TOTAL_CHAOS_DURATION - value: '60' - - # Provide the LIB here - # Only litmus supported + # PROVIDE THE LIB HERE + # ONLY LITMUS SUPPORTED - name: LIB value: 'litmus' - # Period to wait before and after injection of chaos in sec - - name: RAMP_TIME + # provide lib image + - name: LIB_IMAGE + value: 'litmuschaos/go-runner:latest' + + ## percentage of total nodes to target + - name: NODES_AFFECTED_PERC value: '' - + + ## it defines the sequence of chaos execution for multiple target nodes + ## supported values: serial, parallel + - name: SEQUENCE + value: 'parallel' + labels: - name: node-drain + name: node-cpu-hog app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: latest @@ -177,120 +275,107 @@ spec: apiVersion: litmuschaos.io/v1alpha1 description: message: | - Injects network packet loss on pods belonging to an app deployment + Fillup Ephemeral Storage of a Resource kind: ChaosExperiment metadata: - name: pod-network-loss + name: disk-fill labels: - name: pod-network-loss + name: disk-fill app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: latest spec: definition: scope: Namespaced - permissions: - - apiGroups: - - "" - - "batch" - - "apps" - - "apps.openshift.io" - - "argoproj.io" - - "litmuschaos.io" - resources: - - "jobs" - - "pods" - - "pods/log" - - "replicationcontrollers" - - "deployments" - - "statefulsets" - - "daemonsets" - - "replicasets" - - "deploymentconfigs" - - "rollouts" - - "pods/exec" - - "events" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "get" - - "list" - - "patch" - - "create" - - "update" - - "delete" - - "deletecollection" + permissions: + - apiGroups: + - "" + - "apps" + - "batch" + - "apps.openshift.io" + - "argoproj.io" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/exec" + - "pods/log" + - "replicationcontrollers" + - "deployments" + - "statefulsets" + - "daemonsets" + - "replicasets" + - "deploymentconfigs" + - "rollouts" + - "events" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "patch" + - "update" + - "delete" + - "deletecollection" image: "litmuschaos/go-runner:latest" imagePullPolicy: Always args: - -c - - ./experiments -name pod-network-loss + - ./experiments -name disk-fill command: - /bin/bash env: - + - name: TARGET_CONTAINER value: '' - - # provide lib image - - name: LIB_IMAGE - value: 'litmuschaos/go-runner:latest' - - - name: NETWORK_INTERFACE - value: 'eth0' - - - name: TC_IMAGE - value: 'gaiadocker/iproute2' - - - name: NETWORK_PACKET_LOSS_PERCENTAGE - value: '100' #in PERCENTAGE + + - name: FILL_PERCENTAGE + value: '80' - name: TOTAL_CHAOS_DURATION - value: '60' # in seconds + value: '60' - # ime period to wait before and after injection of chaos in sec + # Period to wait before and after injection of chaos in sec - name: RAMP_TIME value: '' - # it can be litmus or pumba + # Provide the LIB here + # Only litmus supported - name: LIB value: 'litmus' + # provide the data block size + # supported unit is KB + - name: DATA_BLOCK_SIZE + value: '256' + + - name: TARGET_PODS + value: '' + + - name: EPHEMERAL_STORAGE_MEBIBYTES + value: '' + ## percentage of total pods to target - name: PODS_AFFECTED_PERC value: '' - - name: TARGET_PODS - value: '' + - name: LIB_IMAGE + value: 'litmuschaos/go-runner:latest' - # provide the name of container runtime - # for litmus LIB, it supports docker, containerd, crio - # for pumba LIB, it supports docker only - - name: CONTAINER_RUNTIME - value: 'docker' - - # provide the destination ips - # chaos injection will be triggered for these destination ips - - name: DESTINATION_IPS - value: '' - - # provide the destination hosts - # chaos injection will be triggered for these destination hosts - - name: DESTINATION_HOSTS - value: '' - - # provide the socket file path - - name: SOCKET_PATH - value: '/var/run/docker.sock' + # Provide the container runtime path + # Default set to docker container path + - name: CONTAINER_PATH + value: '/var/lib/docker/containers' ## it defines the sequence of chaos execution for multiple target pods ## supported values: serial, parallel - name: SEQUENCE value: 'parallel' - + labels: - name: pod-network-loss + name: disk-fill app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: latest @@ -417,12 +502,13 @@ spec: --- apiVersion: litmuschaos.io/v1alpha1 description: - message: "Kills a container belonging to an application pod \n" + message: | + Injects cpu consumption on pods belonging to an app deployment kind: ChaosExperiment metadata: - name: container-kill + name: pod-cpu-hog labels: - name: container-kill + name: pod-cpu-hog app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: latest @@ -432,8 +518,8 @@ spec: permissions: - apiGroups: - "" - - "apps" - "batch" + - "apps" - "apps.openshift.io" - "argoproj.io" - "litmuschaos.io" @@ -457,68 +543,61 @@ spec: - "create" - "list" - "get" - - "update" - "patch" + - "update" - "delete" - "deletecollection" image: "litmuschaos/go-runner:latest" imagePullPolicy: Always args: - -c - - ./experiments -name container-kill + - ./experiments -name pod-cpu-hog command: - /bin/bash env: - - - name: TARGET_CONTAINER - value: '' - - # Period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: '' - - # lib can be litmus or pumba - - name: LIB - value: 'litmus' - - - name: TARGET_PODS - value: '' - - # provide the chaos interval - - name: CHAOS_INTERVAL - value: '10' - - - name: SIGNAL - value: 'SIGKILL' - - # provide the socket file path - - name: SOCKET_PATH - value: '/var/run/docker.sock' - - # provide the name of container runtime - # for litmus LIB, it supports docker, containerd, crio - # for pumba LIB, it supports docker only - - name: CONTAINER_RUNTIME - value: 'docker' - - # provide the total chaos duration - name: TOTAL_CHAOS_DURATION - value: '20' - - ## percentage of total pods to target + value: '60' + + ## Number of CPU cores to stress + - name: CPU_CORES + value: '1' + + ## Percentage of total pods to target - name: PODS_AFFECTED_PERC value: '' - - name: LIB_IMAGE - value: 'litmuschaos/go-runner:latest' + ## Period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: '' + + ## env var that describes the library used to execute the chaos + ## default: litmus. Supported values: litmus, pumba + - name: LIB + value: 'litmus' + + ## It is used in pumba lib only + - name: LIB_IMAGE + value: 'litmuschaos/go-runner:latest' + + ## It is used in pumba lib only + - name: STRESS_IMAGE + value: 'alexeiled/stress-ng:latest-ubuntu' + + # provide the socket file path + # it is used in pumba lib + - name: SOCKET_PATH + value: '/var/run/docker.sock' + + - name: TARGET_PODS + value: '' ## it defines the sequence of chaos execution for multiple target pods ## supported values: serial, parallel - name: SEQUENCE value: 'parallel' - + labels: - name: container-kill + name: pod-cpu-hog app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: latest @@ -631,42 +710,33 @@ spec: apiVersion: litmuschaos.io/v1alpha1 description: message: | - Fillup Ephemeral Storage of a Resource + Restart node kind: ChaosExperiment metadata: - name: disk-fill + name: node-restart labels: - name: disk-fill + name: node-restart app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: latest spec: definition: - scope: Namespaced - permissions: + scope: Cluster + permissions: - apiGroups: - "" - - "apps" - "batch" - - "apps.openshift.io" - - "argoproj.io" - "litmuschaos.io" resources: - "jobs" - "pods" - - "pods/exec" - "pods/log" - - "replicationcontrollers" - - "deployments" - - "statefulsets" - - "daemonsets" - - "replicasets" - - "deploymentconfigs" - - "rollouts" + - "pods/exec" - "events" - "chaosengines" - "chaosexperiments" - "chaosresults" + - "secrets" verbs: - "create" - "list" @@ -675,20 +745,23 @@ spec: - "update" - "delete" - "deletecollection" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" image: "litmuschaos/go-runner:latest" imagePullPolicy: Always args: - -c - - ./experiments -name disk-fill + - ./experiments -name node-restart command: - /bin/bash env: - - - name: TARGET_CONTAINER - value: '' - - - name: FILL_PERCENTAGE - value: '80' + - name: SSH_USER + value: 'root' - name: TOTAL_CHAOS_DURATION value: '60' @@ -697,55 +770,45 @@ spec: - name: RAMP_TIME value: '' - # Provide the LIB here - # Only litmus supported + # PROVIDE THE LIB HERE + # ONLY LITMUS SUPPORTED - name: LIB value: 'litmus' - # provide the data block size - # supported unit is KB - - name: DATA_BLOCK_SIZE - value: '256' - - - name: TARGET_PODS - value: '' - - - name: EPHEMERAL_STORAGE_MEBIBYTES - value: '' - - ## percentage of total pods to target - - name: PODS_AFFECTED_PERC - value: '' - + # provide lib image - name: LIB_IMAGE - value: 'litmuschaos/go-runner:latest' + value: "litmuschaos/go-runner:latest" - # Provide the container runtime path - # Default set to docker container path - - name: CONTAINER_PATH - value: '/var/lib/docker/containers' + # ENTER THE TARGET NODE NAME + - name: TARGET_NODE + value: '' - ## it defines the sequence of chaos execution for multiple target pods - ## supported values: serial, parallel - - name: SEQUENCE - value: 'parallel' + - name: NODE_LABEL + value: '' + + # ENTER THE TARGET NODE IP + - name: TARGET_NODE_IP + value: '' labels: - name: disk-fill + name: node-restart app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: latest + secrets: + - name: id-rsa + mountPath: /mnt/ --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Injects cpu consumption on pods belonging to an app deployment + Inject network packet corruption into application pod kind: ChaosExperiment metadata: - name: pod-cpu-hog + name: pod-network-corruption labels: - name: pod-cpu-hog + name: pod-network-corruption app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: latest @@ -764,7 +827,6 @@ spec: - "jobs" - "pods" - "pods/log" - - "events" - "replicationcontrollers" - "deployments" - "statefulsets" @@ -773,6 +835,119 @@ spec: - "deploymentconfigs" - "rollouts" - "pods/exec" + - "events" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "delete" + - "list" + - "patch" + - "update" + - "get" + - "deletecollection" + image: "litmuschaos/go-runner:latest" + imagePullPolicy: Always + args: + - -c + - ./experiments -name pod-network-corruption + command: + - /bin/bash + env: + + - name: TARGET_CONTAINER + value: '' + + # provide lib image + - name: LIB_IMAGE + value: 'litmuschaos/go-runner:latest' + + - name: NETWORK_INTERFACE + value: 'eth0' + + - name: TC_IMAGE + value: 'gaiadocker/iproute2' + + - name: NETWORK_PACKET_CORRUPTION_PERCENTAGE + value: '100' #in PERCENTAGE + + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds + + # Time period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: '' + + # lib can be litmus or pumba + - name: LIB + value: 'litmus' + + ## percentage of total pods to target + - name: PODS_AFFECTED_PERC + value: '' + + - name: TARGET_PODS + value: '' + + # provide the name of container runtime + # for litmus LIB, it supports docker, containerd, crio + # for pumba LIB, it supports docker only + - name: CONTAINER_RUNTIME + value: 'docker' + + # provide the destination ips + # chaos injection will be triggered for these destination ips + - name: DESTINATION_IPS + value: '' + + # provide the destination hosts + # chaos injection will be triggered for these destination hosts + - name: DESTINATION_HOSTS + value: '' + + # provide the socket file path + - name: SOCKET_PATH + value: '/var/run/docker.sock' + + ## it defines the sequence of chaos execution for multiple target pods + ## supported values: serial, parallel + - name: SEQUENCE + value: 'parallel' + + labels: + name: pod-network-corruption + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: experiment-job + app.kubernetes.io/version: latest + +--- +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Detaching a persistent disk from a node/instance. Supports only for AWS and GCP +kind: ChaosExperiment +metadata: + name: disk-loss + labels: + name: disk-loss + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: chaosexperiment + app.kubernetes.io/version: latest +spec: + definition: + scope: Cluster + permissions: + - apiGroups: + - "" + - "batch" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "events" + - "pods/log" + - "secrets" - "chaosengines" - "chaosexperiments" - "chaosresults" @@ -783,61 +958,59 @@ spec: - "patch" - "update" - "delete" - - "deletecollection" - image: "litmuschaos/go-runner:latest" + image: "litmuschaos/ansible-runner:latest" imagePullPolicy: Always args: - -c - - ./experiments -name pod-cpu-hog + - ansible-playbook ./experiments/generic/disk_loss/disk_loss_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 command: - /bin/bash env: + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' + - name: TOTAL_CHAOS_DURATION - value: '60' + value: '15' - ## Number of CPU cores to stress - - name: CPU_CORES - value: '1' - - ## Percentage of total pods to target - - name: PODS_AFFECTED_PERC - value: '' - - ## Period to wait before and after injection of chaos in sec + # Period to wait before and after injection of chaos in sec - name: RAMP_TIME value: '' - ## env var that describes the library used to execute the chaos - ## default: litmus. Supported values: litmus, pumba - - name: LIB - value: 'litmus' + - name: APP_CHECK + value: 'true' + + # GKE and AWS supported + - name: CLOUD_PLATFORM + value: 'GKE' - ## It is used in pumba lib only - - name: LIB_IMAGE - value: 'litmuschaos/go-runner:latest' - - ## It is used in pumba lib only - - name: STRESS_IMAGE - value: 'alexeiled/stress-ng:latest-ubuntu' - - # provide the socket file path - # it is used in pumba lib - - name: SOCKET_PATH - value: '/var/run/docker.sock' - - - name: TARGET_PODS + - name: PROJECT_ID value: '' - ## it defines the sequence of chaos execution for multiple target pods - ## supported values: serial, parallel - - name: SEQUENCE - value: 'parallel' - + - name: NODE_NAME + value: '' + + - name: DISK_NAME + value: '' + + # provide the LIB + # only litmus supported + - name: LIB + value: 'litmus' + + - name: ZONE_NAME + value: '' + + - name: DEVICE_NAME + value: '' + labels: - name: pod-cpu-hog + name: disk-loss app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: latest + secrets: + - name: cloud-secret + mountPath: /tmp/ --- apiVersion: litmuschaos.io/v1alpha1 @@ -957,12 +1130,203 @@ spec: apiVersion: litmuschaos.io/v1alpha1 description: message: | - Kills the kubelet service on the application node to check the resiliency. + Scale the application replicas and test the node autoscaling on cluster kind: ChaosExperiment metadata: - name: kubelet-service-kill + name: pod-autoscaler labels: - name: kubelet-service-kill + name: pod-autoscaler + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: chaosexperiment + app.kubernetes.io/version: latest +spec: + definition: + scope: Cluster + permissions: + - apiGroups: + - "" + - "batch" + - "apps" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/log" + - "pods/exec" + - "deployments" + - "statefulsets" + - "events" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "patch" + - "update" + - "delete" + - "deletecollection" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" + - "create" + - "patch" + - "update" + image: "litmuschaos/go-runner:latest" + imagePullPolicy: Always + args: + - -c + - ./experiments -name pod-autoscaler + command: + - /bin/bash + env: + + - name: TOTAL_CHAOS_DURATION + value: '60' + + # Period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: '' + + # Number of replicas to scale + - name: REPLICA_COUNT + value: '5' + + # PROVIDE THE LIB HERE + # ONLY LITMUS SUPPORTED + - name: LIB + value: 'litmus' + + labels: + name: pod-autoscaler + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: experiment-job + app.kubernetes.io/version: latest + +--- +apiVersion: litmuschaos.io/v1alpha1 +description: + message: "Kills a container belonging to an application pod \n" +kind: ChaosExperiment +metadata: + name: container-kill + labels: + name: container-kill + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: chaosexperiment + app.kubernetes.io/version: latest +spec: + definition: + scope: Namespaced + permissions: + - apiGroups: + - "" + - "apps" + - "batch" + - "apps.openshift.io" + - "argoproj.io" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/log" + - "events" + - "replicationcontrollers" + - "deployments" + - "statefulsets" + - "daemonsets" + - "replicasets" + - "deploymentconfigs" + - "rollouts" + - "pods/exec" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "update" + - "patch" + - "delete" + - "deletecollection" + image: "litmuschaos/go-runner:latest" + imagePullPolicy: Always + args: + - -c + - ./experiments -name container-kill + command: + - /bin/bash + env: + + - name: TARGET_CONTAINER + value: '' + + # Period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: '' + + # lib can be litmus or pumba + - name: LIB + value: 'litmus' + + - name: TARGET_PODS + value: '' + + # provide the chaos interval + - name: CHAOS_INTERVAL + value: '10' + + - name: SIGNAL + value: 'SIGKILL' + + # provide the socket file path + - name: SOCKET_PATH + value: '/var/run/docker.sock' + + # provide the name of container runtime + # for litmus LIB, it supports docker, containerd, crio + # for pumba LIB, it supports docker only + - name: CONTAINER_RUNTIME + value: 'docker' + + # provide the total chaos duration + - name: TOTAL_CHAOS_DURATION + value: '20' + + ## percentage of total pods to target + - name: PODS_AFFECTED_PERC + value: '' + + - name: LIB_IMAGE + value: 'litmuschaos/go-runner:latest' + + ## it defines the sequence of chaos execution for multiple target pods + ## supported values: serial, parallel + - name: SEQUENCE + value: 'parallel' + + labels: + name: container-kill + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: experiment-job + app.kubernetes.io/version: latest + +--- +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Give a memory hog on a node belonging to a deployment +kind: ChaosExperiment +metadata: + name: node-memory-hog + labels: + name: node-memory-hog app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: latest @@ -1002,34 +1366,58 @@ spec: imagePullPolicy: Always args: - -c - - ./experiments -name kubelet-service-kill + - ./experiments -name node-memory-hog command: - /bin/bash env: - + - name: TOTAL_CHAOS_DURATION - value: '90' # in seconds + value: '120' + + ## Specify the size as percent of total node capacity Ex: '30' + ## NOTE: for selecting this option keep MEMORY_CONSUMPTION_MEBIBYTES empty + - name: MEMORY_CONSUMPTION_PERCENTAGE + value: '' + + ## Specify the amount of memory to be consumed in mebibytes + ## NOTE: for selecting this option keep MEMORY_CONSUMPTION_PERCENTAGE empty + - name: MEMORY_CONSUMPTION_MEBIBYTES + value: '' + + - name: NUMBER_OF_WORKERS + value: '1' + + # ENTER THE COMMA SEPARATED TARGET NODES NAME + - name: TARGET_NODES + value: '' + + - name: NODE_LABEL + value: '' # Period to wait before and after injection of chaos in sec - name: RAMP_TIME value: '' + # Provide the LIB here + # Only litmus supported - name: LIB value: 'litmus' - - name: NODE_LABEL - value: '' - # provide lib image - name: LIB_IMAGE - value: 'ubuntu:16.04' - - # provide the target node name - - name: TARGET_NODE + value: 'litmuschaos/go-runner:latest' + + ## percentage of total nodes to target + - name: NODES_AFFECTED_PERC value: '' + ## it defines the sequence of chaos execution for multiple target nodes + ## supported values: serial, parallel + - name: SEQUENCE + value: 'parallel' + labels: - name: kubelet-service-kill + name: node-memory-hog app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: latest @@ -1038,12 +1426,12 @@ spec: apiVersion: litmuschaos.io/v1alpha1 description: message: | - Deletes a pod belonging to a deployment/statefulset/daemonset + Pod DNS Error injects dns failure/error in target pod containers kind: ChaosExperiment metadata: - name: k8-pod-delete + name: pod-dns-error labels: - name: k8-pod-delete + name: pod-dns-error app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: latest @@ -1053,14 +1441,24 @@ spec: permissions: - apiGroups: - "" - - "apps" - "batch" + - "apps" + - "apps.openshift.io" + - "argoproj.io" - "litmuschaos.io" resources: - - "deployments" - "jobs" - "pods" - - "configmaps" + - "pods/log" + - "replicationcontrollers" + - "deployments" + - "statefulsets" + - "daemonsets" + - "replicasets" + - "deploymentconfigs" + - "rollouts" + - "pods/exec" + - "events" - "chaosengines" - "chaosexperiments" - "chaosresults" @@ -1071,136 +1469,62 @@ spec: - "patch" - "update" - "delete" - - apiGroups: - - "" - resources: - - "nodes" - verbs : - - "get" - - "list" - image: "litmuschaos/chaostoolkit:latest" + - "deletecollection" + image: "litmuschaos/go-runner:latest" args: - - -c - - python /app/chaos/chaostest/kubernetes/k8_wrapper.py; exit 0 + - -c + - ./experiments -name pod-dns-error command: - - /bin/bash + - /bin/bash env: - - name: CHAOSTOOLKIT_IN_POD - value: 'true' + - name: TARGET_CONTAINER + value: "" - - name: FILE - value: 'pod-app-kill-count.json' + # provide lib image + - name: LIB_IMAGE + value: "litmuschaos/go-runner:latest" - - name: NAME_SPACE - value: '' + - name: TOTAL_CHAOS_DURATION + value: "60" # in seconds - - name: LABEL_NAME - value: '' + # Time period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: "" - - name: APP_ENDPOINT - value: '' + ## percentage of total pods to target + - name: PODS_AFFECTED_PERC + value: "" - - name: PERCENTAGE - value: '50' + - name: TARGET_PODS + value: "" - - name: REPORT - value: 'true' + # provide the name of container runtime, it supports docker, containerd, crio + - name: CONTAINER_RUNTIME + value: "docker" - - name: REPORT_ENDPOINT - value: 'none' - - - name: TEST_NAMESPACE - value: 'default' + # provide the socket file path + - name: SOCKET_PATH + value: "/var/run/docker.sock" + ## it defines the sequence of chaos execution for multiple target pods + ## supported values: serial, parallel + - name: SEQUENCE + value: "parallel" + + # list of the target hostnames or kewywords eg. '["litmuschaos","chaosnative.io"]' . If empty all hostnames are targets + - name: TARGET_HOSTNAMES + value: "" + + # can be either exact or substring, determines whether the dns query has to match exactly with one of the targets or can have any of the targets as substring + - name: MATCH_SCHEME + value: "exact" labels: - name: k8-pod-delete + experiment: pod-dns-error app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: latest ---- -apiVersion: litmuschaos.io/v1alpha1 -description: - message: | - Deletes a pod belonging to a deployment/statefulset/daemonset -kind: ChaosExperiment -metadata: - name: k8-service-kill - labels: - name: k8-service-kill - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: chaosexperiment - app.kubernetes.io/version: latest -spec: - definition: - scope: Namespaced - permissions: - - apiGroups: - - "" - - "apps" - - "batch" - - "litmuschaos.io" - resources: - - "deployments" - - "jobs" - - "pods" - - "configmaps" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "create" - - "list" - - "get" - - "patch" - - "update" - - "delete" - - apiGroups: - - "" - resources: - - "nodes" - verbs : - - "get" - - "list" - labels: - name: k8-service-kill - app.kubernetes.io/part-of: litmus - image: "litmuschaos/chaostoolkit:latest" - args: - - -c - - python /app/chaos/chaostest/kubernetes/k8_wrapper.py; exit 0 - command: - - /bin/bash - env: - - name: CHAOSTOOLKIT_IN_POD - value: 'true' - - - name: FILE - value: 'service-app-kill-health.json' - - - name: NAME_SPACE - value: '' - - - name: LABEL_NAME - value: '' - - - name: APP_ENDPOINT - value: '' - - - name: PERCENTAGE - value: '50' - - - name: REPORT - value: 'true' - - - name: REPORT_ENDPOINT - value: 'none' - - - name: TEST_NAMESPACE - value: 'default' - - --- apiVersion: litmuschaos.io/v1alpha1 description: @@ -1371,16 +1695,132 @@ spec: app.kubernetes.io/version: latest +--- --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Scale the application replicas and test the node autoscaling on cluster + IO stress on a app pods belonging to an app deployment kind: ChaosExperiment metadata: - name: pod-autoscaler + name: pod-io-stress labels: - name: pod-autoscaler + name: pod-io-stress + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: chaosexperiment + app.kubernetes.io/version: latest +spec: + definition: + scope: Namespaced + permissions: + - apiGroups: + - "" + - "batch" + - "apps" + - "apps.openshift.io" + - "argoproj.io" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/log" + - "replicationcontrollers" + - "deployments" + - "statefulsets" + - "daemonsets" + - "replicasets" + - "deploymentconfigs" + - "rollouts" + - "pods/exec" + - "events" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "patch" + - "update" + - "delete" + - "deletecollection" + image: "litmuschaos/go-runner:latest" + imagePullPolicy: Always + args: + - -c + - ./experiments -name pod-io-stress + command: + - /bin/bash + env: + - name: TOTAL_CHAOS_DURATION + value: '120' + + ## specify the size as percentage of free space on the file system + ## default value 90 (in percentage) + - name: FILESYSTEM_UTILIZATION_PERCENTAGE + value: '10' + + ## we can specify the size in Gigabyte (Gb) also in place of percentage of free space + ## NOTE: for selecting this option FILESYSTEM_UTILIZATION_PERCENTAGE should be empty + - name: FILESYSTEM_UTILIZATION_BYTES + value: '' + + ## Total number of workers default value is 4 + - name: NUMBER_OF_WORKERS + value: '4' + + ## Percentage of total pods to target + - name: PODS_AFFECTED_PERC + value: '' + + # provide volume mount path + - name: VOLUME_MOUNT_PATH + value: '' + + ## specify the comma separated target pods + - name: TARGET_PODS + value: '' + + # Period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: '' + + # Provide the LIB here + # Only pumba supported + - name: LIB + value: 'pumba' + + # provide lib image + - name: LIB_IMAGE + value: 'litmuschaos/go-runner:latest' + + # provide the socket file path + # it is used in pumba lib + - name: SOCKET_PATH + value: '/var/run/docker.sock' + + ## it defines the sequence of chaos execution for multiple target pods + ## supported values: serial, parallel + - name: SEQUENCE + value: 'parallel' + + labels: + name: pod-io-stress + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: experiment-job + app.kubernetes.io/version: latest + +--- +--- +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Taint the node where application pod is scheduled +kind: ChaosExperiment +metadata: + name: node-taint + labels: + name: node-taint app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: latest @@ -1396,10 +1836,359 @@ spec: resources: - "jobs" - "pods" + - "events" - "pods/log" - "pods/exec" + - "daemonsets" + - "pods/eviction" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "patch" + - "update" + - "delete" + - "deletecollection" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" + - "patch" + - "update" + image: "litmuschaos/go-runner:latest" + imagePullPolicy: Always + args: + - -c + - ./experiments -name node-taint + command: + - /bin/bash + env: + + - name: TARGET_NODE + value: '' + + - name: NODE_LABEL + value: '' + + - name: TOTAL_CHAOS_DURATION + value: '60' + + # Provide the LIB here + # Only litmus supported + - name: LIB + value: 'litmus' + + # Period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: '' + + # set taint label & effect + # key=value:effect or key:effect + - name: TAINTS + value: '' + + labels: + name: node-taint + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: experiment-job + app.kubernetes.io/version: latest + +--- +--- +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Drain the node where application pod is scheduled +kind: ChaosExperiment +metadata: + name: node-drain + labels: + name: node-drain + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: chaosexperiment + app.kubernetes.io/version: latest +spec: + definition: + scope: Cluster + permissions: + - apiGroups: + - "" + - "batch" + - "litmuschaos.io" + - "apps" + resources: + - "jobs" + - "pods" + - "events" + - "pods/log" + - "pods/exec" + - "daemonsets" + - "pods/eviction" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "patch" + - "update" + - "delete" + - "deletecollection" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" + - "patch" + image: "litmuschaos/go-runner:latest" + imagePullPolicy: Always + args: + - -c + - ./experiments -name node-drain + command: + - /bin/bash + env: + + - name: TARGET_NODE + value: '' + + - name: NODE_LABEL + value: '' + + - name: TOTAL_CHAOS_DURATION + value: '60' + + # Provide the LIB here + # Only litmus supported + - name: LIB + value: 'litmus' + + # Period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: '' + + labels: + name: node-drain + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: experiment-job + app.kubernetes.io/version: latest + +--- +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Pod DNS Spoof can spoof particular dns requests in target pod container to desired target hostnames +kind: ChaosExperiment +metadata: + name: pod-dns-spoof + labels: + name: pod-dns-spoof + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: chaosexperiment + app.kubernetes.io/version: latest +spec: + definition: + scope: Namespaced + permissions: + - apiGroups: + - "" + - "batch" + - "apps" + - "apps.openshift.io" + - "argoproj.io" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/log" + - "replicationcontrollers" - "deployments" - "statefulsets" + - "daemonsets" + - "replicasets" + - "deploymentconfigs" + - "rollouts" + - "pods/exec" + - "events" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "patch" + - "update" + - "delete" + - "deletecollection" + image: "litmuschaos/go-runner:latest" + args: + - -c + - ./experiments -name pod-dns-spoof + command: + - /bin/bash + env: + - name: TARGET_CONTAINER + value: "" + + # provide lib image + - name: LIB_IMAGE + value: "litmuschaos/go-runner:latest" + + - name: TOTAL_CHAOS_DURATION + value: "60" # in seconds + + # Time period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: "" + + ## percentage of total pods to target + - name: PODS_AFFECTED_PERC + value: "" + + - name: TARGET_PODS + value: "" + + # provide the name of container runtime, it supports docker, containerd, crio + - name: CONTAINER_RUNTIME + value: "docker" + + # provide the socket file path + - name: SOCKET_PATH + value: "/var/run/docker.sock" + + ## it defines the sequence of chaos execution for multiple target pods + ## supported values: serial, parallel + - name: SEQUENCE + value: "parallel" + + # map of the target hostnames eg. '{"abc.com":"spoofabc.com"}' . If empty no queries will be spoofed + - name: SPOOF_MAP + value: "" + + labels: + experiment: pod-dns-spoof + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: experiment-job + app.kubernetes.io/version: latest + +--- +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Deletes a pod belonging to a deployment/statefulset/daemonset +kind: ChaosExperiment +metadata: + name: k8-service-kill + labels: + name: k8-service-kill + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: chaosexperiment + app.kubernetes.io/version: latest +spec: + definition: + scope: Namespaced + permissions: + - apiGroups: + - "" + - "apps" + - "batch" + - "litmuschaos.io" + resources: + - "deployments" + - "jobs" + - "pods" + - "configmaps" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "patch" + - "update" + - "delete" + - apiGroups: + - "" + resources: + - "nodes" + verbs : + - "get" + - "list" + labels: + name: k8-service-kill + app.kubernetes.io/part-of: litmus + image: "litmuschaos/chaostoolkit:latest" + args: + - -c + - python /app/chaos/chaostest/kubernetes/k8_wrapper.py; exit 0 + command: + - /bin/bash + env: + - name: CHAOSTOOLKIT_IN_POD + value: 'true' + + - name: FILE + value: 'service-app-kill-health.json' + + - name: NAME_SPACE + value: '' + + - name: LABEL_NAME + value: '' + + - name: APP_ENDPOINT + value: '' + + - name: PERCENTAGE + value: '50' + + - name: REPORT + value: 'true' + + - name: REPORT_ENDPOINT + value: 'none' + + - name: TEST_NAMESPACE + value: 'default' + + +--- +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Kills the kubelet service on the application node to check the resiliency. +kind: ChaosExperiment +metadata: + name: kubelet-service-kill + labels: + name: kubelet-service-kill + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: chaosexperiment + app.kubernetes.io/version: latest +spec: + definition: + scope: Cluster + permissions: + - apiGroups: + - "" + - "batch" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/log" + - "pods/exec" - "events" - "chaosengines" - "chaosexperiments" @@ -1419,36 +2208,160 @@ spec: verbs: - "get" - "list" - - "create" - - "patch" - - "update" image: "litmuschaos/go-runner:latest" imagePullPolicy: Always args: - -c - - ./experiments -name pod-autoscaler + - ./experiments -name kubelet-service-kill command: - /bin/bash env: - + - name: TOTAL_CHAOS_DURATION - value: '60' + value: '90' # in seconds # Period to wait before and after injection of chaos in sec - name: RAMP_TIME value: '' - # Number of replicas to scale - - name: REPLICA_COUNT - value: '5' - - # PROVIDE THE LIB HERE - # ONLY LITMUS SUPPORTED - name: LIB value: 'litmus' - + + - name: NODE_LABEL + value: '' + + # provide lib image + - name: LIB_IMAGE + value: 'ubuntu:16.04' + + # provide the target node name + - name: TARGET_NODE + value: '' + labels: - name: pod-autoscaler + name: kubelet-service-kill + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: experiment-job + app.kubernetes.io/version: latest + +--- +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Injects network packet loss on pods belonging to an app deployment +kind: ChaosExperiment +metadata: + name: pod-network-loss + labels: + name: pod-network-loss + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: chaosexperiment + app.kubernetes.io/version: latest +spec: + definition: + scope: Namespaced + permissions: + - apiGroups: + - "" + - "batch" + - "apps" + - "apps.openshift.io" + - "argoproj.io" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/log" + - "replicationcontrollers" + - "deployments" + - "statefulsets" + - "daemonsets" + - "replicasets" + - "deploymentconfigs" + - "rollouts" + - "pods/exec" + - "events" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "get" + - "list" + - "patch" + - "create" + - "update" + - "delete" + - "deletecollection" + image: "litmuschaos/go-runner:latest" + imagePullPolicy: Always + args: + - -c + - ./experiments -name pod-network-loss + command: + - /bin/bash + env: + + - name: TARGET_CONTAINER + value: '' + + # provide lib image + - name: LIB_IMAGE + value: 'litmuschaos/go-runner:latest' + + - name: NETWORK_INTERFACE + value: 'eth0' + + - name: TC_IMAGE + value: 'gaiadocker/iproute2' + + - name: NETWORK_PACKET_LOSS_PERCENTAGE + value: '100' #in PERCENTAGE + + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds + + # ime period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: '' + + # it can be litmus or pumba + - name: LIB + value: 'litmus' + + ## percentage of total pods to target + - name: PODS_AFFECTED_PERC + value: '' + + - name: TARGET_PODS + value: '' + + # provide the name of container runtime + # for litmus LIB, it supports docker, containerd, crio + # for pumba LIB, it supports docker only + - name: CONTAINER_RUNTIME + value: 'docker' + + # provide the destination ips + # chaos injection will be triggered for these destination ips + - name: DESTINATION_IPS + value: '' + + # provide the destination hosts + # chaos injection will be triggered for these destination hosts + - name: DESTINATION_HOSTS + value: '' + + # provide the socket file path + - name: SOCKET_PATH + value: '/var/run/docker.sock' + + ## it defines the sequence of chaos execution for multiple target pods + ## supported values: serial, parallel + - name: SEQUENCE + value: 'parallel' + + labels: + name: pod-network-loss app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: latest @@ -1576,916 +2489,3 @@ spec: app.kubernetes.io/version: latest --- -apiVersion: litmuschaos.io/v1alpha1 -description: - message: | - Deletes a pod belonging to a deployment/statefulset/daemonset -kind: ChaosExperiment -metadata: - name: pod-delete - labels: - name: pod-delete - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: chaosexperiment - app.kubernetes.io/version: latest -spec: - definition: - scope: Namespaced - permissions: - - apiGroups: - - "" - - "apps" - - "apps.openshift.io" - - "argoproj.io" - - "batch" - - "litmuschaos.io" - resources: - - "deployments" - - "jobs" - - "pods" - - "pods/log" - - "replicationcontrollers" - - "deployments" - - "statefulsets" - - "daemonsets" - - "replicasets" - - "deploymentconfigs" - - "rollouts" - - "pods/exec" - - "events" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "create" - - "list" - - "get" - - "patch" - - "update" - - "delete" - - "deletecollection" - image: "litmuschaos/go-runner:latest" - imagePullPolicy: Always - args: - - -c - - ./experiments -name pod-delete - command: - - /bin/bash - env: - - - name: TOTAL_CHAOS_DURATION - value: '15' - - # Period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: '' - - - name: FORCE - value: 'true' - - - name: CHAOS_INTERVAL - value: '5' - - ## percentage of total pods to target - - name: PODS_AFFECTED_PERC - value: '' - - - name: LIB - value: 'litmus' - - - name: TARGET_PODS - value: '' - - ## it defines the sequence of chaos execution for multiple target pods - ## supported values: serial, parallel - - name: SEQUENCE - value: 'parallel' - - labels: - name: pod-delete - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: experiment-job - app.kubernetes.io/version: latest - ---- -apiVersion: litmuschaos.io/v1alpha1 -description: - message: | - Give a cpu spike on a node belonging to a deployment -kind: ChaosExperiment -metadata: - name: node-cpu-hog - labels: - name: node-cpu-hog - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: chaosexperiment - app.kubernetes.io/version: latest -spec: - definition: - scope: Cluster - permissions: - - apiGroups: - - "" - - "batch" - - "litmuschaos.io" - resources: - - "jobs" - - "pods" - - "pods/log" - - "pods/exec" - - "events" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "create" - - "list" - - "get" - - "patch" - - "update" - - "delete" - - "deletecollection" - - apiGroups: - - "" - resources: - - "nodes" - verbs: - - "get" - - "list" - image: "litmuschaos/go-runner:latest" - imagePullPolicy: Always - args: - - -c - - ./experiments -name node-cpu-hog - command: - - /bin/bash - env: - - - name: TOTAL_CHAOS_DURATION - value: '60' - - # Period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: '' - - ## ENTER THE NUMBER OF CORES OF CPU FOR CPU HOGGING - ## OPTIONAL VALUE IN CASE OF EMPTY VALUE IT WILL TAKE NODE CPU CAPACITY - - name: NODE_CPU_CORE - value: '' - - # ENTER THE COMMA SEPARATED TARGET NODES NAME - - name: TARGET_NODES - value: '' - - - name: NODE_LABEL - value: '' - - # PROVIDE THE LIB HERE - # ONLY LITMUS SUPPORTED - - name: LIB - value: 'litmus' - - # provide lib image - - name: LIB_IMAGE - value: 'litmuschaos/go-runner:latest' - - ## percentage of total nodes to target - - name: NODES_AFFECTED_PERC - value: '' - - ## it defines the sequence of chaos execution for multiple target nodes - ## supported values: serial, parallel - - name: SEQUENCE - value: 'parallel' - - labels: - name: node-cpu-hog - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: experiment-job - app.kubernetes.io/version: latest - ---- -apiVersion: litmuschaos.io/v1alpha1 -description: - message: | - Inject network packet corruption into application pod -kind: ChaosExperiment -metadata: - name: pod-network-corruption - labels: - name: pod-network-corruption - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: chaosexperiment - app.kubernetes.io/version: latest -spec: - definition: - scope: Namespaced - permissions: - - apiGroups: - - "" - - "batch" - - "apps" - - "apps.openshift.io" - - "argoproj.io" - - "litmuschaos.io" - resources: - - "jobs" - - "pods" - - "pods/log" - - "replicationcontrollers" - - "deployments" - - "statefulsets" - - "daemonsets" - - "replicasets" - - "deploymentconfigs" - - "rollouts" - - "pods/exec" - - "events" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "create" - - "delete" - - "list" - - "patch" - - "update" - - "get" - - "deletecollection" - image: "litmuschaos/go-runner:latest" - imagePullPolicy: Always - args: - - -c - - ./experiments -name pod-network-corruption - command: - - /bin/bash - env: - - - name: TARGET_CONTAINER - value: '' - - # provide lib image - - name: LIB_IMAGE - value: 'litmuschaos/go-runner:latest' - - - name: NETWORK_INTERFACE - value: 'eth0' - - - name: TC_IMAGE - value: 'gaiadocker/iproute2' - - - name: NETWORK_PACKET_CORRUPTION_PERCENTAGE - value: '100' #in PERCENTAGE - - - name: TOTAL_CHAOS_DURATION - value: '60' # in seconds - - # Time period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: '' - - # lib can be litmus or pumba - - name: LIB - value: 'litmus' - - ## percentage of total pods to target - - name: PODS_AFFECTED_PERC - value: '' - - - name: TARGET_PODS - value: '' - - # provide the name of container runtime - # for litmus LIB, it supports docker, containerd, crio - # for pumba LIB, it supports docker only - - name: CONTAINER_RUNTIME - value: 'docker' - - # provide the destination ips - # chaos injection will be triggered for these destination ips - - name: DESTINATION_IPS - value: '' - - # provide the destination hosts - # chaos injection will be triggered for these destination hosts - - name: DESTINATION_HOSTS - value: '' - - # provide the socket file path - - name: SOCKET_PATH - value: '/var/run/docker.sock' - - ## it defines the sequence of chaos execution for multiple target pods - ## supported values: serial, parallel - - name: SEQUENCE - value: 'parallel' - - labels: - name: pod-network-corruption - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: experiment-job - app.kubernetes.io/version: latest - ---- ---- -apiVersion: litmuschaos.io/v1alpha1 -description: - message: | - IO stress on a app pods belonging to an app deployment -kind: ChaosExperiment -metadata: - name: pod-io-stress - labels: - name: pod-io-stress - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: chaosexperiment - app.kubernetes.io/version: latest -spec: - definition: - scope: Namespaced - permissions: - - apiGroups: - - "" - - "batch" - - "apps" - - "apps.openshift.io" - - "argoproj.io" - - "litmuschaos.io" - resources: - - "jobs" - - "pods" - - "pods/log" - - "replicationcontrollers" - - "deployments" - - "statefulsets" - - "daemonsets" - - "replicasets" - - "deploymentconfigs" - - "rollouts" - - "pods/exec" - - "events" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "create" - - "list" - - "get" - - "patch" - - "update" - - "delete" - - "deletecollection" - image: "litmuschaos/go-runner:latest" - imagePullPolicy: Always - args: - - -c - - ./experiments -name pod-io-stress - command: - - /bin/bash - env: - - name: TOTAL_CHAOS_DURATION - value: '120' - - ## specify the size as percentage of free space on the file system - ## default value 90 (in percentage) - - name: FILESYSTEM_UTILIZATION_PERCENTAGE - value: '10' - - ## we can specify the size in Gigabyte (Gb) also in place of percentage of free space - ## NOTE: for selecting this option FILESYSTEM_UTILIZATION_PERCENTAGE should be empty - - name: FILESYSTEM_UTILIZATION_BYTES - value: '' - - ## Total number of workers default value is 4 - - name: NUMBER_OF_WORKERS - value: '4' - - ## Percentage of total pods to target - - name: PODS_AFFECTED_PERC - value: '' - - # provide volume mount path - - name: VOLUME_MOUNT_PATH - value: '' - - ## specify the comma separated target pods - - name: TARGET_PODS - value: '' - - # Period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: '' - - # Provide the LIB here - # Only pumba supported - - name: LIB - value: 'pumba' - - # provide lib image - - name: LIB_IMAGE - value: 'litmuschaos/go-runner:latest' - - # provide the socket file path - # it is used in pumba lib - - name: SOCKET_PATH - value: '/var/run/docker.sock' - - ## it defines the sequence of chaos execution for multiple target pods - ## supported values: serial, parallel - - name: SEQUENCE - value: 'parallel' - - labels: - name: pod-io-stress - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: experiment-job - app.kubernetes.io/version: latest - ---- -apiVersion: litmuschaos.io/v1alpha1 -description: - message: | - Pod DNS Spoof can spoof particular dns requests in target pod container to desired target hostnames -kind: ChaosExperiment -metadata: - name: pod-dns-spoof - labels: - name: pod-dns-spoof - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: chaosexperiment - app.kubernetes.io/version: latest -spec: - definition: - scope: Namespaced - permissions: - - apiGroups: - - "" - - "batch" - - "apps" - - "apps.openshift.io" - - "argoproj.io" - - "litmuschaos.io" - resources: - - "jobs" - - "pods" - - "pods/log" - - "replicationcontrollers" - - "deployments" - - "statefulsets" - - "daemonsets" - - "replicasets" - - "deploymentconfigs" - - "rollouts" - - "pods/exec" - - "events" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "create" - - "list" - - "get" - - "patch" - - "update" - - "delete" - - "deletecollection" - image: "litmuschaos/go-runner:latest" - args: - - -c - - ./experiments -name pod-dns-spoof - command: - - /bin/bash - env: - - name: TARGET_CONTAINER - value: "" - - # provide lib image - - name: LIB_IMAGE - value: "litmuschaos/go-runner:latest" - - - name: TOTAL_CHAOS_DURATION - value: "60" # in seconds - - # Time period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: "" - - ## percentage of total pods to target - - name: PODS_AFFECTED_PERC - value: "" - - - name: TARGET_PODS - value: "" - - # provide the name of container runtime, it supports docker, containerd, crio - - name: CONTAINER_RUNTIME - value: "docker" - - # provide the socket file path - - name: SOCKET_PATH - value: "/var/run/docker.sock" - - ## it defines the sequence of chaos execution for multiple target pods - ## supported values: serial, parallel - - name: SEQUENCE - value: "parallel" - - # map of the target hostnames eg. '{"abc.com":"spoofabc.com"}' . If empty no queries will be spoofed - - name: SPOOF_MAP - value: "" - - labels: - experiment: pod-dns-spoof - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: experiment-job - app.kubernetes.io/version: latest - ---- -apiVersion: litmuschaos.io/v1alpha1 -description: - message: | - Detaching a persistent disk from a node/instance. Supports only for AWS and GCP -kind: ChaosExperiment -metadata: - name: disk-loss - labels: - name: disk-loss - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: chaosexperiment - app.kubernetes.io/version: latest -spec: - definition: - scope: Cluster - permissions: - - apiGroups: - - "" - - "batch" - - "litmuschaos.io" - resources: - - "jobs" - - "pods" - - "events" - - "pods/log" - - "secrets" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "create" - - "list" - - "get" - - "patch" - - "update" - - "delete" - image: "litmuschaos/ansible-runner:latest" - imagePullPolicy: Always - args: - - -c - - ansible-playbook ./experiments/generic/disk_loss/disk_loss_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 - command: - - /bin/bash - env: - - name: ANSIBLE_STDOUT_CALLBACK - value: 'default' - - - name: TOTAL_CHAOS_DURATION - value: '15' - - # Period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: '' - - - name: APP_CHECK - value: 'true' - - # GKE and AWS supported - - name: CLOUD_PLATFORM - value: 'GKE' - - - name: PROJECT_ID - value: '' - - - name: NODE_NAME - value: '' - - - name: DISK_NAME - value: '' - - # provide the LIB - # only litmus supported - - name: LIB - value: 'litmus' - - - name: ZONE_NAME - value: '' - - - name: DEVICE_NAME - value: '' - - labels: - name: disk-loss - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: experiment-job - app.kubernetes.io/version: latest - secrets: - - name: cloud-secret - mountPath: /tmp/ - ---- -apiVersion: litmuschaos.io/v1alpha1 -description: - message: | - Give a memory hog on a node belonging to a deployment -kind: ChaosExperiment -metadata: - name: node-memory-hog - labels: - name: node-memory-hog - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: chaosexperiment - app.kubernetes.io/version: latest -spec: - definition: - scope: Cluster - permissions: - - apiGroups: - - "" - - "batch" - - "litmuschaos.io" - resources: - - "jobs" - - "pods" - - "pods/log" - - "pods/exec" - - "events" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "create" - - "list" - - "get" - - "patch" - - "update" - - "delete" - - "deletecollection" - - apiGroups: - - "" - resources: - - "nodes" - verbs: - - "get" - - "list" - image: "litmuschaos/go-runner:latest" - imagePullPolicy: Always - args: - - -c - - ./experiments -name node-memory-hog - command: - - /bin/bash - env: - - - name: TOTAL_CHAOS_DURATION - value: '120' - - ## Specify the size as percent of total node capacity Ex: '30' - ## NOTE: for selecting this option keep MEMORY_CONSUMPTION_MEBIBYTES empty - - name: MEMORY_CONSUMPTION_PERCENTAGE - value: '' - - ## Specify the amount of memory to be consumed in mebibytes - ## NOTE: for selecting this option keep MEMORY_CONSUMPTION_PERCENTAGE empty - - name: MEMORY_CONSUMPTION_MEBIBYTES - value: '' - - - name: NUMBER_OF_WORKERS - value: '1' - - # ENTER THE COMMA SEPARATED TARGET NODES NAME - - name: TARGET_NODES - value: '' - - - name: NODE_LABEL - value: '' - - # Period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: '' - - # Provide the LIB here - # Only litmus supported - - name: LIB - value: 'litmus' - - # provide lib image - - name: LIB_IMAGE - value: 'litmuschaos/go-runner:latest' - - ## percentage of total nodes to target - - name: NODES_AFFECTED_PERC - value: '' - - ## it defines the sequence of chaos execution for multiple target nodes - ## supported values: serial, parallel - - name: SEQUENCE - value: 'parallel' - - labels: - name: node-memory-hog - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: experiment-job - app.kubernetes.io/version: latest - ---- ---- -apiVersion: litmuschaos.io/v1alpha1 -description: - message: | - Taint the node where application pod is scheduled -kind: ChaosExperiment -metadata: - name: node-taint - labels: - name: node-taint - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: chaosexperiment - app.kubernetes.io/version: latest -spec: - definition: - scope: Cluster - permissions: - - apiGroups: - - "" - - "batch" - - "apps" - - "litmuschaos.io" - resources: - - "jobs" - - "pods" - - "events" - - "pods/log" - - "pods/exec" - - "daemonsets" - - "pods/eviction" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "create" - - "list" - - "get" - - "patch" - - "update" - - "delete" - - "deletecollection" - - apiGroups: - - "" - resources: - - "nodes" - verbs: - - "get" - - "list" - - "patch" - - "update" - image: "litmuschaos/go-runner:latest" - imagePullPolicy: Always - args: - - -c - - ./experiments -name node-taint - command: - - /bin/bash - env: - - - name: TARGET_NODE - value: '' - - - name: NODE_LABEL - value: '' - - - name: TOTAL_CHAOS_DURATION - value: '60' - - # Provide the LIB here - # Only litmus supported - - name: LIB - value: 'litmus' - - # Period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: '' - - # set taint label & effect - # key=value:effect or key:effect - - name: TAINTS - value: '' - - labels: - name: node-taint - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: experiment-job - app.kubernetes.io/version: latest - ---- -apiVersion: litmuschaos.io/v1alpha1 -description: - message: | - Pod DNS Error injects dns failure/error in target pod containers -kind: ChaosExperiment -metadata: - name: pod-dns-error - labels: - name: pod-dns-error - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: chaosexperiment - app.kubernetes.io/version: latest -spec: - definition: - scope: Namespaced - permissions: - - apiGroups: - - "" - - "batch" - - "apps" - - "apps.openshift.io" - - "argoproj.io" - - "litmuschaos.io" - resources: - - "jobs" - - "pods" - - "pods/log" - - "replicationcontrollers" - - "deployments" - - "statefulsets" - - "daemonsets" - - "replicasets" - - "deploymentconfigs" - - "rollouts" - - "pods/exec" - - "events" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "create" - - "list" - - "get" - - "patch" - - "update" - - "delete" - - "deletecollection" - image: "litmuschaos/go-runner:latest" - args: - - -c - - ./experiments -name pod-dns-error - command: - - /bin/bash - env: - - name: TARGET_CONTAINER - value: "" - - # provide lib image - - name: LIB_IMAGE - value: "litmuschaos/go-runner:latest" - - - name: TOTAL_CHAOS_DURATION - value: "60" # in seconds - - # Time period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: "" - - ## percentage of total pods to target - - name: PODS_AFFECTED_PERC - value: "" - - - name: TARGET_PODS - value: "" - - # provide the name of container runtime, it supports docker, containerd, crio - - name: CONTAINER_RUNTIME - value: "docker" - - # provide the socket file path - - name: SOCKET_PATH - value: "/var/run/docker.sock" - - ## it defines the sequence of chaos execution for multiple target pods - ## supported values: serial, parallel - - name: SEQUENCE - value: "parallel" - - # list of the target hostnames or kewywords eg. '["litmuschaos","chaosnative.io"]' . If empty all hostnames are targets - - name: TARGET_HOSTNAMES - value: "" - - # can be either exact or substring, determines whether the dns query has to match exactly with one of the targets or can have any of the targets as substring - - name: MATCH_SCHEME - value: "exact" - - labels: - experiment: pod-dns-error - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: experiment-job - app.kubernetes.io/version: latest - ---- diff --git a/charts/openebs/experiments.yaml b/charts/openebs/experiments.yaml index 282ebd9..dc8a5d8 100644 --- a/charts/openebs/experiments.yaml +++ b/charts/openebs/experiments.yaml @@ -1,127 +1,42 @@ ---- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Kill all openebs control plane pod and check if gets scheduled again + Network loss to target pod belonging to a OpenEBS PVC kind: ChaosExperiment metadata: labels: litmuschaos.io/name: openebs - name: openebs-control-plane-chaos + name: openebs-target-network-loss app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: latest - name: openebs-control-plane-chaos -spec: - definition: - scope: Namespaced - permissions: - - apiGroups: - - "" - - "litmuschaos.io" - - "batch" - - "apps" - resources: - - "pods" - - "pods/log" - - "deployments" - - "events" - - "jobs" - - "configmaps" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "create" - - "list" - - "get" - - "patch" - - "update" - - "delete" - - apiGroups: - - "" - resources: - - "nodes" - verbs: - - "get" - - "list" - - image: "litmuschaos/ansible-runner:latest" - imagePullPolicy: Always - args: - - -c - - ansible-playbook ./experiments/openebs/openebs-control-plane-chaos/openebs_control_plane_chaos_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 - command: - - /bin/bash - env: - - name: ANSIBLE_STDOUT_CALLBACK - value: 'default' - - - name: OPENEBS_NAMESPACE - value: 'openebs' - - ## Period to wait before injection of chaos - - name: RAMP_TIME - value: '' - - - name: FORCE - value: '' - - ## env var that describes the library used to execute the chaos - ## default: litmus. Supported values: litmus, powerfulseal - - name: LIB - value: '' - - labels: - name: openebs-control-plane-chaos - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: experiment-job - app.kubernetes.io/version: latest - ---- -apiVersion: litmuschaos.io/v1alpha1 -description: - message: | - Network loss to pool pod belonging to a OpenEBS PVC -kind: ChaosExperiment -metadata: - labels: - litmuschaos.io/name: openebs - name: openebs-pool-network-loss - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: chaosexperiment - app.kubernetes.io/version: latest - name: openebs-pool-network-loss + name: openebs-target-network-loss spec: definition: scope: Cluster permissions: - apiGroups: - "" - - "apps" - - "litmuschaos.io" - - "batch" - "extensions" + - "apps" + - "batch" + - "litmuschaos.io" - "storage.k8s.io" - - "openebs.io" resources: + - "jobs" - "pods" + - "events" + - "services" - "pods/log" - "pods/exec" - - "events" - - "jobs" - "configmaps" - - "services" + - "secrets" - "persistentvolumeclaims" - "storageclasses" - - "persistentvolumeclaims" - "persistentvolumes" - "chaosengines" - "chaosexperiments" - "chaosresults" - - "cstorpools" - - "cstorvolumereplicas" - - "replicasets" verbs: - "create" - "get" @@ -133,7 +48,7 @@ spec: imagePullPolicy: Always args: - -c - - ansible-playbook ./experiments/openebs/openebs-pool-network-loss/openebs_pool_network_loss_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + - ansible-playbook ./experiments/openebs/openebs-target-network-loss/openebs_target_network_loss_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 command: - /bin/bash env: @@ -143,14 +58,17 @@ spec: - name: OPENEBS_NAMESPACE value: 'openebs' + - name: APP_PVC + value: '' + + - name: TC_IMAGE + value: 'gaiadocker/iproute2' + # only pumba supported # For pumba image use : gaiaadm/pumba:0.6.5 - name: LIB_IMAGE value: 'gaiaadm/pumba:0.6.5' - - name: TC_IMAGE - value: 'gaiadocker/iproute2' - - name: NETWORK_PACKET_LOSS_PERCENTAGE value: '100' # in percentage @@ -167,105 +85,13 @@ spec: value: '' labels: - name: openebs-pool-network-loss + name: openebs-target-network-loss app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: latest - ---- -apiVersion: litmuschaos.io/v1alpha1 -description: - message: | - Network delay to pool pod belonging to a OpenEBS PVC - This experiment is using pumba lib for network chaos -kind: ChaosExperiment -metadata: - labels: - litmuschaos.io/name: openebs - name: openebs-pool-network-delay - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: chaosexperiment - app.kubernetes.io/version: latest - name: openebs-pool-network-delay -spec: - definition: - scope: Cluster - permissions: - - apiGroups: - - "" - - "apps" - - "litmuschaos.io" - - "batch" - - "extensions" - - "storage.k8s.io" - - "openebs.io" - resources: - - "pods" - - "pods/exec" - - "jobs" - - "pods/log" - - "events" - - "configmaps" - - "services" - - "persistentvolumeclaims" - - "storageclasses" - - "persistentvolumes" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - - "cstorpools" - - "cstorvolumereplicas" - - "replicasets" - verbs: - - "create" - - "get" - - "list" - - "patch" - - "update" - - "delete" - image: "litmuschaos/ansible-runner:latest" - imagePullPolicy: Always - args: - - -c - - ansible-playbook ./experiments/openebs/openebs-pool-network-delay/openebs_pool_network_delay_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 - command: - - /bin/bash - env: - - name: ANSIBLE_STDOUT_CALLBACK - value: 'default' - - - name: OPENEBS_NAMESPACE - value: 'openebs' - - # only pumba supported - # For pumba image use : gaiaadm/pumba:0.6.5 - - name: LIB_IMAGE - value: 'gaiaadm/pumba:0.6.5' - - # in milliseconds - - name: NETWORK_DELAY - value: '60000' - - - name: TC_IMAGE - value: 'gaiadocker/iproute2' - - - name: TOTAL_CHAOS_DURATION - value: '60' # in seconds - - - name: LIVENESS_APP_LABEL - value: '' - - - name: LIVENESS_APP_NAMESPACE - value: '' - - - name: DATA_PERSISTENCE - value: '' - - labels: - name: openebs-pool-network-delay - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: experiment-job - app.kubernetes.io/version: latest + #configmaps: + #- name: openebs-target-network-loss + # mountPath: /mnt --- --- @@ -382,6 +208,583 @@ spec: #- name: openebs-target-container-failure # mountPath: /mnt +--- +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Network delay to pool pod belonging to a OpenEBS PVC + This experiment is using pumba lib for network chaos +kind: ChaosExperiment +metadata: + labels: + litmuschaos.io/name: openebs + name: openebs-pool-network-delay + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: chaosexperiment + app.kubernetes.io/version: latest + name: openebs-pool-network-delay +spec: + definition: + scope: Cluster + permissions: + - apiGroups: + - "" + - "apps" + - "litmuschaos.io" + - "batch" + - "extensions" + - "storage.k8s.io" + - "openebs.io" + resources: + - "pods" + - "pods/exec" + - "jobs" + - "pods/log" + - "events" + - "configmaps" + - "services" + - "persistentvolumeclaims" + - "storageclasses" + - "persistentvolumes" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + - "cstorpools" + - "cstorvolumereplicas" + - "replicasets" + verbs: + - "create" + - "get" + - "list" + - "patch" + - "update" + - "delete" + image: "litmuschaos/ansible-runner:latest" + imagePullPolicy: Always + args: + - -c + - ansible-playbook ./experiments/openebs/openebs-pool-network-delay/openebs_pool_network_delay_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + command: + - /bin/bash + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' + + - name: OPENEBS_NAMESPACE + value: 'openebs' + + # only pumba supported + # For pumba image use : gaiaadm/pumba:0.6.5 + - name: LIB_IMAGE + value: 'gaiaadm/pumba:0.6.5' + + # in milliseconds + - name: NETWORK_DELAY + value: '60000' + + - name: TC_IMAGE + value: 'gaiadocker/iproute2' + + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds + + - name: LIVENESS_APP_LABEL + value: '' + + - name: LIVENESS_APP_NAMESPACE + value: '' + + - name: DATA_PERSISTENCE + value: '' + + labels: + name: openebs-pool-network-delay + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: experiment-job + app.kubernetes.io/version: latest + +--- +--- +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Kill the pool container and check if gets scheduled again +kind: ChaosExperiment +metadata: + labels: + litmuschaos.io/name: openebs + name: openebs-pool-container-failure + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: chaosexperiment + app.kubernetes.io/version: latest + name: openebs-pool-container-failure +spec: + definition: + scope: Cluster + permissions: + - apiGroups: + - "" + - "extensions" + - "apps" + - "batch" + - "litmuschaos.io" + - "openebs.io" + - "storage.k8s.io" + resources: + - "replicasets" + - "events" + - "jobs" + - "pods" + - "pods/log" + - "pods/exec" + - "configmaps" + - "secrets" + - "persistentvolumeclaims" + - "cstorvolumereplicas" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "get" + - "delete" + - "list" + - "patch" + - "update" + image: "litmuschaos/ansible-runner:latest" + imagePullPolicy: Always + args: + - -c + - ansible-playbook ./experiments/openebs/openebs-pool-container-failure/openebs_pool_container_failure_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + command: + - /bin/bash + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' + + - name: OPENEBS_NS + value: 'openebs' + + - name: APP_PVC + value: '' + + - name: LIVENESS_APP_LABEL + value: '' + + # only pumba supported + # For pumba image use : gaiaadm/pumba:0.6.5 + - name: LIB_IMAGE + value: 'gaiaadm/pumba:0.6.5' + + - name: LIVENESS_APP_NAMESPACE + value: '' + + # provide the chaos interval + - name: CHAOS_INTERVAL + value: '10' + + # provide the total chaos duration + - name: TOTAL_CHAOS_DURATION + value: '20' + + - name: DATA_PERSISTENCE + value: '' + + - name: CHAOS_ITERATIONS + value: '2' + + labels: + name: openebs-pool-container-failure + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: experiment-job + app.kubernetes.io/version: latest + #configmaps: + #- name: openebs-pool-container-failure + # mountPath: /mnt + +--- +--- +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Kill the cstor target/Jiva controller pod and check if gets created again +kind: ChaosExperiment +metadata: + labels: + litmuschaos.io/name: openebs + name: openebs-target-pod-failure + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: chaosexperiment + app.kubernetes.io/version: latest + name: openebs-target-pod-failure +spec: + definition: + scope: Cluster + permissions: + - apiGroups: + - "" + - "extensions" + - "apps" + - "batch" + - "litmuschaos.io" + - "storage.k8s.io" + resources: + - "deployments" + - "jobs" + - "events" + - "pods" + - "pods/log" + - "pods/exec" + - "configmaps" + - "secrets" + - "services" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + - "persistentvolumeclaims" + - "storageclasses" + - "persistentvolumes" + verbs: + - "create" + - "get" + - "delete" + - "list" + - "patch" + - "update" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" + image: "litmuschaos/ansible-runner:latest" + imagePullPolicy: Always + args: + - -c + - ansible-playbook ./experiments/openebs/openebs-target-pod-failure/openebs_target_pod_failure_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + command: + - /bin/bash + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' + + - name: OPENEBS_NAMESPACE + value: 'openebs' + + - name: APP_PVC + value: '' + + - name: FORCE + value: 'true' + + - name: LIVENESS_APP_LABEL + value: '' + + - name: LIVENESS_APP_NAMESPACE + value: '' + + - name: DATA_PERSISTENCE + value: '' + + - name: TOTAL_CHAOS_DURATION + value: '60' + + # provide the kill count + - name: KILL_COUNT + value: '' + + - name: CHAOS_INTERVAL + value: '15' + + - name: DEPLOY_TYPE + value: 'deployment' + + labels: + name: openebs-target-pod-failure + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: experiment-job + app.kubernetes.io/version: latest + #configmaps: + #- name: openebs-target-pod-failure + # mountPath: /mnt + +--- +--- +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Kill the pool pod and check if gets scheduled again +kind: ChaosExperiment +metadata: + labels: + litmuschaos.io/name: openebs + name: openebs-pool-pod-failure + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: chaosexperiment + app.kubernetes.io/version: latest + name: openebs-pool-pod-failure +spec: + definition: + scope: Cluster + permissions: + - apiGroups: + - "" + - "extensions" + - "apps" + - "batch" + - "litmuschaos.io" + - "openebs.io" + - "storage.k8s.io" + resources: + - "deployments" + - "replicasets" + - "jobs" + - "pods/log" + - "events" + - "pods" + - "configmaps" + - "secrets" + - "storageclasses" + - "persistentvolumeclaims" + - "cstorvolumereplicas" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "get" + - "delete" + - "list" + - "patch" + - "update" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" + image: "litmuschaos/ansible-runner:latest" + imagePullPolicy: Always + args: + - -c + - ansible-playbook ./experiments/openebs/openebs-pool-pod-failure/openebs_pool_pod_failure_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + command: + - /bin/bash + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: default + + - name: OPENEBS_NS + value: 'openebs' + + - name: APP_PVC + value: '' + + - name: LIVENESS_APP_LABEL + value: '' + + - name: LIVENESS_APP_NAMESPACE + value: '' + + - name: CHAOS_ITERATIONS + value: '2' + + # provide the kill count + - name: KILL_COUNT + value: '' + + - name: DATA_PERSISTENCE + value: '' + + labels: + name: openebs-pool-pod-failure + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: experiment-job + app.kubernetes.io/version: latest + #configmaps: + #- name: openebs-pool-pod-failure + # mountPath: /mnt + + +--- +--- +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Kill all openebs control plane pod and check if gets scheduled again +kind: ChaosExperiment +metadata: + labels: + litmuschaos.io/name: openebs + name: openebs-control-plane-chaos + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: chaosexperiment + app.kubernetes.io/version: latest + name: openebs-control-plane-chaos +spec: + definition: + scope: Namespaced + permissions: + - apiGroups: + - "" + - "litmuschaos.io" + - "batch" + - "apps" + resources: + - "pods" + - "pods/log" + - "deployments" + - "events" + - "jobs" + - "configmaps" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "patch" + - "update" + - "delete" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" + + image: "litmuschaos/ansible-runner:latest" + imagePullPolicy: Always + args: + - -c + - ansible-playbook ./experiments/openebs/openebs-control-plane-chaos/openebs_control_plane_chaos_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + command: + - /bin/bash + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' + + - name: OPENEBS_NAMESPACE + value: 'openebs' + + ## Period to wait before injection of chaos + - name: RAMP_TIME + value: '' + + - name: FORCE + value: '' + + ## env var that describes the library used to execute the chaos + ## default: litmus. Supported values: litmus, powerfulseal + - name: LIB + value: '' + + labels: + name: openebs-control-plane-chaos + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: experiment-job + app.kubernetes.io/version: latest + +--- +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Network delay to target pod belonging to a deployment/statefulset/daemonset +kind: ChaosExperiment +metadata: + labels: + litmuschaos.io/name: openebs + name: openebs-target-network-delay + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: chaosexperiment + app.kubernetes.io/version: latest + name: openebs-target-network-delay +spec: + definition: + scope: Cluster + permissions: + - apiGroups: + - "" + - "extensions" + - "apps" + - "batch" + - "litmuschaos.io" + - "storage.k8s.io" + resources: + - "jobs" + - "pods" + - "services" + - "events" + - "pods/exec" + - "pods/log" + - "configmaps" + - "secrets" + - "persistentvolumeclaims" + - "storageclasses" + - "persistentvolumes" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "get" + - "delete" + - "list" + - "patch" + - "update" + image: "litmuschaos/ansible-runner:latest" + imagePullPolicy: Always + args: + - -c + - ansible-playbook ./experiments/openebs/openebs-target-network-delay/openebs_target_network_delay_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + command: + - /bin/bash + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' + + - name: OPENEBS_NAMESPACE + value: 'openebs' + + - name: APP_PVC + value: '' + + - name: TC_IMAGE + value: 'gaiadocker/iproute2' + + # only pumba supported + # For pumba image use : gaiaadm/pumba:0.6.5 + - name: LIB_IMAGE + value: 'gaiaadm/pumba:0.6.5' + + - name: NETWORK_DELAY + value: '60000' # in milliseconds + + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds + + - name: LIVENESS_APP_LABEL + value: '' + + - name: LIVENESS_APP_NAMESPACE + value: '' + + - name: DATA_PERSISTENCE + value: '' + + labels: + name: openebs-target-network-delay + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: experiment-job + app.kubernetes.io/version: latest + #configmaps: + #- name: openebs-target-network-delay + # mountPath: /mnt + --- --- apiVersion: litmuschaos.io/v1alpha1 @@ -495,145 +898,50 @@ spec: - name: cloud-secret mountPath: /tmp/ ---- --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Kill the pool pod and check if gets scheduled again + Network loss to pool pod belonging to a OpenEBS PVC kind: ChaosExperiment metadata: labels: litmuschaos.io/name: openebs - name: openebs-pool-pod-failure + name: openebs-pool-network-loss app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: latest - name: openebs-pool-pod-failure + name: openebs-pool-network-loss spec: definition: scope: Cluster permissions: - apiGroups: - "" - - "extensions" - "apps" - - "batch" - "litmuschaos.io" + - "batch" + - "extensions" + - "storage.k8s.io" - "openebs.io" - - "storage.k8s.io" resources: - - "deployments" - - "replicasets" - - "jobs" - - "pods/log" - - "events" - "pods" - - "configmaps" - - "secrets" - - "storageclasses" - - "persistentvolumeclaims" - - "cstorvolumereplicas" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "create" - - "get" - - "delete" - - "list" - - "patch" - - "update" - - apiGroups: - - "" - resources: - - "nodes" - verbs: - - "get" - - "list" - image: "litmuschaos/ansible-runner:latest" - imagePullPolicy: Always - args: - - -c - - ansible-playbook ./experiments/openebs/openebs-pool-pod-failure/openebs_pool_pod_failure_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 - command: - - /bin/bash - env: - - name: ANSIBLE_STDOUT_CALLBACK - value: default - - - name: OPENEBS_NS - value: 'openebs' - - - name: APP_PVC - value: '' - - - name: LIVENESS_APP_LABEL - value: '' - - - name: LIVENESS_APP_NAMESPACE - value: '' - - - name: CHAOS_ITERATIONS - value: '2' - - # provide the kill count - - name: KILL_COUNT - value: '' - - - name: DATA_PERSISTENCE - value: '' - - labels: - name: openebs-pool-pod-failure - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: experiment-job - app.kubernetes.io/version: latest - #configmaps: - #- name: openebs-pool-pod-failure - # mountPath: /mnt - - ---- -apiVersion: litmuschaos.io/v1alpha1 -description: - message: | - Network loss to target pod belonging to a OpenEBS PVC -kind: ChaosExperiment -metadata: - labels: - litmuschaos.io/name: openebs - name: openebs-target-network-loss - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: chaosexperiment - app.kubernetes.io/version: latest - name: openebs-target-network-loss -spec: - definition: - scope: Cluster - permissions: - - apiGroups: - - "" - - "extensions" - - "apps" - - "batch" - - "litmuschaos.io" - - "storage.k8s.io" - resources: - - "jobs" - - "pods" - - "events" - - "services" - "pods/log" - "pods/exec" + - "events" + - "jobs" - "configmaps" - - "secrets" + - "services" - "persistentvolumeclaims" - "storageclasses" + - "persistentvolumeclaims" - "persistentvolumes" - "chaosengines" - "chaosexperiments" - "chaosresults" + - "cstorpools" + - "cstorvolumereplicas" + - "replicasets" verbs: - "create" - "get" @@ -645,7 +953,7 @@ spec: imagePullPolicy: Always args: - -c - - ansible-playbook ./experiments/openebs/openebs-target-network-loss/openebs_target_network_loss_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + - ansible-playbook ./experiments/openebs/openebs-pool-network-loss/openebs_pool_network_loss_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 command: - /bin/bash env: @@ -655,17 +963,14 @@ spec: - name: OPENEBS_NAMESPACE value: 'openebs' - - name: APP_PVC - value: '' - - - name: TC_IMAGE - value: 'gaiadocker/iproute2' - # only pumba supported # For pumba image use : gaiaadm/pumba:0.6.5 - name: LIB_IMAGE value: 'gaiaadm/pumba:0.6.5' + - name: TC_IMAGE + value: 'gaiadocker/iproute2' + - name: NETWORK_PACKET_LOSS_PERCENTAGE value: '100' # in percentage @@ -682,13 +987,10 @@ spec: value: '' labels: - name: openebs-target-network-loss + name: openebs-pool-network-loss app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: latest - #configmaps: - #- name: openebs-target-network-loss - # mountPath: /mnt --- --- @@ -793,305 +1095,3 @@ spec: mountPath: /mnt/ --- ---- -apiVersion: litmuschaos.io/v1alpha1 -description: - message: | - Kill the pool container and check if gets scheduled again -kind: ChaosExperiment -metadata: - labels: - litmuschaos.io/name: openebs - name: openebs-pool-container-failure - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: chaosexperiment - app.kubernetes.io/version: latest - name: openebs-pool-container-failure -spec: - definition: - scope: Cluster - permissions: - - apiGroups: - - "" - - "extensions" - - "apps" - - "batch" - - "litmuschaos.io" - - "openebs.io" - - "storage.k8s.io" - resources: - - "replicasets" - - "events" - - "jobs" - - "pods" - - "pods/log" - - "pods/exec" - - "configmaps" - - "secrets" - - "persistentvolumeclaims" - - "cstorvolumereplicas" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "create" - - "get" - - "delete" - - "list" - - "patch" - - "update" - image: "litmuschaos/ansible-runner:latest" - imagePullPolicy: Always - args: - - -c - - ansible-playbook ./experiments/openebs/openebs-pool-container-failure/openebs_pool_container_failure_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 - command: - - /bin/bash - env: - - name: ANSIBLE_STDOUT_CALLBACK - value: 'default' - - - name: OPENEBS_NS - value: 'openebs' - - - name: APP_PVC - value: '' - - - name: LIVENESS_APP_LABEL - value: '' - - # only pumba supported - # For pumba image use : gaiaadm/pumba:0.6.5 - - name: LIB_IMAGE - value: 'gaiaadm/pumba:0.6.5' - - - name: LIVENESS_APP_NAMESPACE - value: '' - - # provide the chaos interval - - name: CHAOS_INTERVAL - value: '10' - - # provide the total chaos duration - - name: TOTAL_CHAOS_DURATION - value: '20' - - - name: DATA_PERSISTENCE - value: '' - - - name: CHAOS_ITERATIONS - value: '2' - - labels: - name: openebs-pool-container-failure - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: experiment-job - app.kubernetes.io/version: latest - #configmaps: - #- name: openebs-pool-container-failure - # mountPath: /mnt - ---- -apiVersion: litmuschaos.io/v1alpha1 -description: - message: | - Network delay to target pod belonging to a deployment/statefulset/daemonset -kind: ChaosExperiment -metadata: - labels: - litmuschaos.io/name: openebs - name: openebs-target-network-delay - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: chaosexperiment - app.kubernetes.io/version: latest - name: openebs-target-network-delay -spec: - definition: - scope: Cluster - permissions: - - apiGroups: - - "" - - "extensions" - - "apps" - - "batch" - - "litmuschaos.io" - - "storage.k8s.io" - resources: - - "jobs" - - "pods" - - "services" - - "events" - - "pods/exec" - - "pods/log" - - "configmaps" - - "secrets" - - "persistentvolumeclaims" - - "storageclasses" - - "persistentvolumes" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "create" - - "get" - - "delete" - - "list" - - "patch" - - "update" - image: "litmuschaos/ansible-runner:latest" - imagePullPolicy: Always - args: - - -c - - ansible-playbook ./experiments/openebs/openebs-target-network-delay/openebs_target_network_delay_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 - command: - - /bin/bash - env: - - name: ANSIBLE_STDOUT_CALLBACK - value: 'default' - - - name: OPENEBS_NAMESPACE - value: 'openebs' - - - name: APP_PVC - value: '' - - - name: TC_IMAGE - value: 'gaiadocker/iproute2' - - # only pumba supported - # For pumba image use : gaiaadm/pumba:0.6.5 - - name: LIB_IMAGE - value: 'gaiaadm/pumba:0.6.5' - - - name: NETWORK_DELAY - value: '60000' # in milliseconds - - - name: TOTAL_CHAOS_DURATION - value: '60' # in seconds - - - name: LIVENESS_APP_LABEL - value: '' - - - name: LIVENESS_APP_NAMESPACE - value: '' - - - name: DATA_PERSISTENCE - value: '' - - labels: - name: openebs-target-network-delay - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: experiment-job - app.kubernetes.io/version: latest - #configmaps: - #- name: openebs-target-network-delay - # mountPath: /mnt - ---- ---- -apiVersion: litmuschaos.io/v1alpha1 -description: - message: | - Kill the cstor target/Jiva controller pod and check if gets created again -kind: ChaosExperiment -metadata: - labels: - litmuschaos.io/name: openebs - name: openebs-target-pod-failure - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: chaosexperiment - app.kubernetes.io/version: latest - name: openebs-target-pod-failure -spec: - definition: - scope: Cluster - permissions: - - apiGroups: - - "" - - "extensions" - - "apps" - - "batch" - - "litmuschaos.io" - - "storage.k8s.io" - resources: - - "deployments" - - "jobs" - - "events" - - "pods" - - "pods/log" - - "pods/exec" - - "configmaps" - - "secrets" - - "services" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - - "persistentvolumeclaims" - - "storageclasses" - - "persistentvolumes" - verbs: - - "create" - - "get" - - "delete" - - "list" - - "patch" - - "update" - - apiGroups: - - "" - resources: - - "nodes" - verbs: - - "get" - - "list" - image: "litmuschaos/ansible-runner:latest" - imagePullPolicy: Always - args: - - -c - - ansible-playbook ./experiments/openebs/openebs-target-pod-failure/openebs_target_pod_failure_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 - command: - - /bin/bash - env: - - name: ANSIBLE_STDOUT_CALLBACK - value: 'default' - - - name: OPENEBS_NAMESPACE - value: 'openebs' - - - name: APP_PVC - value: '' - - - name: FORCE - value: 'true' - - - name: LIVENESS_APP_LABEL - value: '' - - - name: LIVENESS_APP_NAMESPACE - value: '' - - - name: DATA_PERSISTENCE - value: '' - - - name: TOTAL_CHAOS_DURATION - value: '60' - - # provide the kill count - - name: KILL_COUNT - value: '' - - - name: CHAOS_INTERVAL - value: '15' - - - name: DEPLOY_TYPE - value: 'deployment' - - labels: - name: openebs-target-pod-failure - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: experiment-job - app.kubernetes.io/version: latest - #configmaps: - #- name: openebs-target-pod-failure - # mountPath: /mnt - ----