1123421505: version upgraded for chaos-charts

This commit is contained in:
litmusbot
2021-08-12 09:51:21 +00:00
parent be89e34763
commit 07253c6d84
6 changed files with 2757 additions and 2749 deletions

View File

@@ -82,7 +82,7 @@ spec:
value: '15' value: '15'
# SET THE CASSANDRA_LIVENESS_CHECK # SET THE CASSANDRA_LIVENESS_CHECK
# IT CAN BE `enabled` OR `disabled` # IT CAN BE `enable` OR `disable`
- name: CASSANDRA_LIVENESS_CHECK - name: CASSANDRA_LIVENESS_CHECK
value: '' value: ''

View File

@@ -1,99 +1,4 @@
apiVersion: litmuschaos.io/v1alpha1 apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Causes loss of a non-boot storage persistent disk from a GCP VM instance for a specified duration of time
kind: ChaosExperiment
metadata:
name: gcp-vm-disk-loss
labels:
name: gcp-vm-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
- apiGroups:
- ""
- "batch"
- "litmuschaos.io"
resources:
- "jobs"
- "pods"
- "pods/log"
- "events"
- "secrets"
- "pods/exec"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "list"
- "get"
- "patch"
- "update"
- "delete"
- "deletecollection"
image: "litmuschaos/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name gcp-vm-disk-loss
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
- name: LIB
value: 'litmus'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# parallel or serial; determines how chaos is injected
- name: SEQUENCE
value: 'parallel'
# set the GCP project id
- name: GCP_PROJECT_ID
value: ''
# set the disk volume name(s) as comma seperated values
# eg. volume1,volume2,...
- name: DISK_VOLUME_NAMES
value: ''
# set the disk zone(s) as comma seperated values in the corresponding
# order of DISK_VOLUME_NAME
# eg. zone1,zone2,...
- name: DISK_ZONES
value: ''
# set the device name(s) as comma seperated values in the corresponding
# order of DISK_VOLUME_NAME
# eg. device1,device2,...
- name: DEVICE_NAMES
value: ''
labels:
name: gcp-vm-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---
apiVersion: litmuschaos.io/v1alpha1
description: description:
message: | message: |
Stops GCP VM instances and GKE nodes for a specified duration of time and later restarts them Stops GCP VM instances and GKE nodes for a specified duration of time and later restarts them
@@ -189,3 +94,98 @@ spec:
mountPath: /tmp/ mountPath: /tmp/
--- ---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Causes loss of a non-boot storage persistent disk from a GCP VM instance for a specified duration of time
kind: ChaosExperiment
metadata:
name: gcp-vm-disk-loss
labels:
name: gcp-vm-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
- apiGroups:
- ""
- "batch"
- "litmuschaos.io"
resources:
- "jobs"
- "pods"
- "pods/log"
- "events"
- "secrets"
- "pods/exec"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "list"
- "get"
- "patch"
- "update"
- "delete"
- "deletecollection"
image: "litmuschaos/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name gcp-vm-disk-loss
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
- name: LIB
value: 'litmus'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# parallel or serial; determines how chaos is injected
- name: SEQUENCE
value: 'parallel'
# set the GCP project id
- name: GCP_PROJECT_ID
value: ''
# set the disk volume name(s) as comma seperated values
# eg. volume1,volume2,...
- name: DISK_VOLUME_NAMES
value: ''
# set the disk zone(s) as comma seperated values in the corresponding
# order of DISK_VOLUME_NAME
# eg. zone1,zone2,...
- name: DISK_ZONES
value: ''
# set the device name(s) as comma seperated values in the corresponding
# order of DISK_VOLUME_NAME
# eg. device1,device2,...
- name: DEVICE_NAMES
value: ''
labels:
name: gcp-vm-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---

File diff suppressed because it is too large Load Diff

View File

@@ -1,124 +1,4 @@
apiVersion: litmuschaos.io/v1alpha1 apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Detaching a persistent disk from a node/instance for kafka.
kind: ChaosExperiment
metadata:
name: kafka-broker-disk-failure
labels:
name: kafka-broker-disk-failure
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
- apiGroups:
- ""
- "apps"
- "batch"
- "litmuschaos.io"
resources:
- "statefulsets"
- "secrets"
- "jobs"
- "pods/log"
- "events"
- "pods"
- "pods/exec"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "delete"
- "get"
- "list"
- "patch"
image: "litmuschaos/ansible-runner:latest"
imagePullPolicy: Always
args:
- -c
- ansible-playbook ./experiments/kafka/kafka-broker-disk-failure/kafka-broker-disk-failure-ansible-logic.yml -i /etc/ansible/hosts -vv; exit 0
command:
- /bin/bash
env:
- name: ANSIBLE_STDOUT_CALLBACK
value: 'default'
- name: KAFKA_KIND
value: 'statefulset'
- name: KAFKA_LIVENESS_STREAM
value: 'enabled'
- name: KAFKA_LIVENESS_IMAGE
value: 'litmuschaos/kafka-client:ci'
- name: KAFKA_CONSUMER_TIMEOUT
value: '30000'
- name: TOTAL_CHAOS_DURATION
value: '15'
- name: PROJECT_ID
value: ''
- name: DISK_NAME
value: ''
- name: ZONE_NAME
value: ''
# GKE and AWS supported
- name: CLOUD_PLATFORM
value: 'GKE'
- name: KAFKA_NAMESPACE
value: ''
- name: KAFKA_LABEL
value: ''
- name: KAFKA_BROKER
value: ''
- name: KAFKA_INSTANCE_NAME
value: ''
- name: KAFKA_REPLICATION_FACTOR
value: ''
- name: KAFKA_SERVICE
value: ''
- name: KAFKA_PORT
value: ''
- name: ZOOKEEPER_NAMESPACE
value: ''
- name: ZOOKEEPER_LABEL
value: ''
- name: ZOOKEEPER_SERVICE
value: ''
- name: ZOOKEEPER_PORT
value: ''
labels:
name: kafka-broker-disk-failure
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: kafka-broker-disk-failure
mountPath: /tmp/
---
apiVersion: litmuschaos.io/v1alpha1
description: description:
message: | message: |
Deleting a kafka broker pod Deleting a kafka broker pod
@@ -247,3 +127,123 @@ spec:
app.kubernetes.io/version: latest app.kubernetes.io/version: latest
--- ---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Detaching a persistent disk from a node/instance for kafka.
kind: ChaosExperiment
metadata:
name: kafka-broker-disk-failure
labels:
name: kafka-broker-disk-failure
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
- apiGroups:
- ""
- "apps"
- "batch"
- "litmuschaos.io"
resources:
- "statefulsets"
- "secrets"
- "jobs"
- "pods/log"
- "events"
- "pods"
- "pods/exec"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "delete"
- "get"
- "list"
- "patch"
image: "litmuschaos/ansible-runner:latest"
imagePullPolicy: Always
args:
- -c
- ansible-playbook ./experiments/kafka/kafka-broker-disk-failure/kafka-broker-disk-failure-ansible-logic.yml -i /etc/ansible/hosts -vv; exit 0
command:
- /bin/bash
env:
- name: ANSIBLE_STDOUT_CALLBACK
value: 'default'
- name: KAFKA_KIND
value: 'statefulset'
- name: KAFKA_LIVENESS_STREAM
value: 'enabled'
- name: KAFKA_LIVENESS_IMAGE
value: 'litmuschaos/kafka-client:ci'
- name: KAFKA_CONSUMER_TIMEOUT
value: '30000'
- name: TOTAL_CHAOS_DURATION
value: '15'
- name: PROJECT_ID
value: ''
- name: DISK_NAME
value: ''
- name: ZONE_NAME
value: ''
# GKE and AWS supported
- name: CLOUD_PLATFORM
value: 'GKE'
- name: KAFKA_NAMESPACE
value: ''
- name: KAFKA_LABEL
value: ''
- name: KAFKA_BROKER
value: ''
- name: KAFKA_INSTANCE_NAME
value: ''
- name: KAFKA_REPLICATION_FACTOR
value: ''
- name: KAFKA_SERVICE
value: ''
- name: KAFKA_PORT
value: ''
- name: ZOOKEEPER_NAMESPACE
value: ''
- name: ZOOKEEPER_LABEL
value: ''
- name: ZOOKEEPER_SERVICE
value: ''
- name: ZOOKEEPER_PORT
value: ''
labels:
name: kafka-broker-disk-failure
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: kafka-broker-disk-failure
mountPath: /tmp/
---

View File

@@ -3,180 +3,6 @@ description:
message: | message: |
Detaching an ebs volume from ec2 instance. Detaching an ebs volume from ec2 instance.
kind: ChaosExperiment kind: ChaosExperiment
metadata:
name: ec2-terminate-by-id
labels:
name: ec2-terminate-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
- apiGroups:
- ""
- "batch"
- "litmuschaos.io"
resources:
- "jobs"
- "pods"
- "events"
- "pods/log"
- "pods/exec"
- "secrets"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "list"
- "get"
- "patch"
- "update"
- "delete"
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name ec2-terminate-by-id
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# enable it if the target instance is a part of self-managed nodegroup.
- name: MANAGED_NODEGROUP
value: 'disable'
# Instance ID of the target ec2 instance
# Multiple IDs can also be provided as comma separated values ex: id1,id2
- name: EC2_INSTANCE_ID
value: ''
- name: REGION
value: ''
- name: SEQUENCE
value: 'parallel'
# Provide the path of aws credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: '/tmp/cloud_config.yml'
# provide the LIB
# only litmus supported
- name: LIB
value: 'litmus'
labels:
name: ec2-terminate-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Detaching an ebs volume from ec2 instance.
kind: ChaosExperiment
metadata:
name: ebs-loss-by-tag
labels:
name: ebs-loss-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
- apiGroups:
- ""
- "batch"
- "litmuschaos.io"
resources:
- "jobs"
- "pods"
- "events"
- "pods/log"
- "pods/exec"
- "secrets"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "list"
- "get"
- "patch"
- "update"
- "delete"
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name ebs-loss-by-tag
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
- name: RAMP_TIME
value: ''
- name: EBS_VOLUME_TAG
value: ''
- name: REGION
value: ''
- name: SEQUENCE
value: 'parallel'
- name: VOLUME_AFFECTED_PERC
value: ''
# Provide the path of aws credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: '/tmp/cloud_config.yml'
# provide the LIB
# only litmus supported
- name: LIB
value: 'litmus'
labels:
name: ebs-loss-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Detaching an ebs volume from ec2 instance.
kind: ChaosExperiment
metadata: metadata:
name: ebs-loss-by-id name: ebs-loss-by-id
labels: labels:
@@ -346,3 +172,177 @@ spec:
mountPath: /tmp/ mountPath: /tmp/
--- ---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Detaching an ebs volume from ec2 instance.
kind: ChaosExperiment
metadata:
name: ebs-loss-by-tag
labels:
name: ebs-loss-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
- apiGroups:
- ""
- "batch"
- "litmuschaos.io"
resources:
- "jobs"
- "pods"
- "events"
- "pods/log"
- "pods/exec"
- "secrets"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "list"
- "get"
- "patch"
- "update"
- "delete"
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name ebs-loss-by-tag
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
- name: RAMP_TIME
value: ''
- name: EBS_VOLUME_TAG
value: ''
- name: REGION
value: ''
- name: SEQUENCE
value: 'parallel'
- name: VOLUME_AFFECTED_PERC
value: ''
# Provide the path of aws credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: '/tmp/cloud_config.yml'
# provide the LIB
# only litmus supported
- name: LIB
value: 'litmus'
labels:
name: ebs-loss-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Detaching an ebs volume from ec2 instance.
kind: ChaosExperiment
metadata:
name: ec2-terminate-by-id
labels:
name: ec2-terminate-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
- apiGroups:
- ""
- "batch"
- "litmuschaos.io"
resources:
- "jobs"
- "pods"
- "events"
- "pods/log"
- "pods/exec"
- "secrets"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "list"
- "get"
- "patch"
- "update"
- "delete"
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name ec2-terminate-by-id
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# enable it if the target instance is a part of self-managed nodegroup.
- name: MANAGED_NODEGROUP
value: 'disable'
# Instance ID of the target ec2 instance
# Multiple IDs can also be provided as comma separated values ex: id1,id2
- name: EC2_INSTANCE_ID
value: ''
- name: REGION
value: ''
- name: SEQUENCE
value: 'parallel'
# Provide the path of aws credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: '/tmp/cloud_config.yml'
# provide the LIB
# only litmus supported
- name: LIB
value: 'litmus'
labels:
name: ec2-terminate-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---

View File

@@ -96,16 +96,16 @@ spec:
apiVersion: litmuschaos.io/v1alpha1 apiVersion: litmuschaos.io/v1alpha1
description: description:
message: | message: |
Kill the cstor target/Jiva controller pod and check if gets created again Kill the pool pod and check if gets scheduled again
kind: ChaosExperiment kind: ChaosExperiment
metadata: metadata:
labels: labels:
litmuschaos.io/name: openebs litmuschaos.io/name: openebs
name: openebs-target-pod-failure name: openebs-pool-pod-failure
app.kubernetes.io/part-of: litmus app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest app.kubernetes.io/version: latest
name: openebs-target-pod-failure name: openebs-pool-pod-failure
spec: spec:
definition: definition:
scope: Cluster scope: Cluster
@@ -116,23 +116,23 @@ spec:
- "apps" - "apps"
- "batch" - "batch"
- "litmuschaos.io" - "litmuschaos.io"
- "openebs.io"
- "storage.k8s.io" - "storage.k8s.io"
resources: resources:
- "deployments" - "deployments"
- "replicasets"
- "jobs" - "jobs"
- "pods/log"
- "events" - "events"
- "pods" - "pods"
- "pods/log"
- "pods/exec"
- "configmaps" - "configmaps"
- "secrets" - "secrets"
- "services" - "storageclasses"
- "persistentvolumeclaims"
- "cstorvolumereplicas"
- "chaosengines" - "chaosengines"
- "chaosexperiments" - "chaosexperiments"
- "chaosresults" - "chaosresults"
- "persistentvolumeclaims"
- "storageclasses"
- "persistentvolumes"
verbs: verbs:
- "create" - "create"
- "get" - "get"
@@ -151,154 +151,44 @@ spec:
imagePullPolicy: Always imagePullPolicy: Always
args: args:
- -c - -c
- ansible-playbook ./experiments/openebs/openebs-target-pod-failure/openebs_target_pod_failure_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 - ansible-playbook ./experiments/openebs/openebs-pool-pod-failure/openebs_pool_pod_failure_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0
command: command:
- /bin/bash - /bin/bash
env: env:
- name: ANSIBLE_STDOUT_CALLBACK - name: ANSIBLE_STDOUT_CALLBACK
value: 'default' value: default
- name: OPENEBS_NAMESPACE - name: OPENEBS_NS
value: 'openebs' value: 'openebs'
- name: APP_PVC - name: APP_PVC
value: '' value: ''
- name: FORCE
value: 'true'
- name: LIVENESS_APP_LABEL - name: LIVENESS_APP_LABEL
value: '' value: ''
- name: LIVENESS_APP_NAMESPACE - name: LIVENESS_APP_NAMESPACE
value: '' value: ''
- name: DATA_PERSISTENCE - name: CHAOS_ITERATIONS
value: '' value: '2'
- name: TOTAL_CHAOS_DURATION
value: '60'
# provide the kill count # provide the kill count
- name: KILL_COUNT - name: KILL_COUNT
value: '' value: ''
- name: CHAOS_INTERVAL - name: DATA_PERSISTENCE
value: '15' value: ''
- name: DEPLOY_TYPE
value: 'deployment'
labels: labels:
name: openebs-target-pod-failure name: openebs-pool-pod-failure
app.kubernetes.io/part-of: litmus app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest app.kubernetes.io/version: latest
#configmaps: #configmaps:
#- name: openebs-target-pod-failure #- name: openebs-pool-pod-failure
# mountPath: /mnt # mountPath: /mnt
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Kill the OpenEBS NFS provisioner container and check if pods consuming the NFS PVs continue to be available and volumes are writable (RWM mode)
kind: ChaosExperiment
metadata:
labels:
litmuschaos.io/name: openebs
name: openebs-nfs-provisioner-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
name: openebs-nfs-provisioner-kill
spec:
definition:
scope: Cluster
permissions:
- apiGroups:
- ""
- "apps"
- "litmuschaos.io"
- "batch"
- "extensions"
- "storage.k8s.io"
resources:
- "pods"
- "pods/exec"
- "pods/log"
- "deployments"
- "events"
- "jobs"
- "configmaps"
- "services"
- "persistentvolumeclaims"
- "storageclasses"
- "persistentvolumes"
- "chaosexperiments"
- "chaosresults"
- "chaosengines"
verbs:
- "create"
- "list"
- "get"
- "patch"
- "update"
- "delete"
- apiGroups:
- ""
resources:
- "nodes"
verbs:
- "get"
- "list"
image: "litmuschaos/ansible-runner:latest"
imagePullPolicy: Always
args:
- -c
- ansible-playbook ./experiments/openebs/openebs-nfs-provisioner-kill/openebs_nfs_provisioner_kill_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0
command:
- /bin/bash
env:
- name: ANSIBLE_STDOUT_CALLBACK
value: 'default'
# NFS default container
- name: TARGET_CONTAINER
value: 'nfs-provisioner'
# Period to wait before injection of chaos in sec
- name: RAMP_TIME
value: ''
# It supports pumba and containerd
- name: LIB
value: 'pumba'
# LIB_IMAGE can be - gaiaadm/pumba:0.6.5, gprasath/crictl:ci
# For pumba image use: gaiaadm/pumba:0.6.5
# For containerd image use: gprasath/crictl:ci
- name: LIB_IMAGE
value: 'gaiaadm/pumba:0.6.5'
# provide the chaos interval
- name: CHAOS_INTERVAL
value: '10'
# provide the total chaos duration
- name: TOTAL_CHAOS_DURATION
value: '20'
labels:
name: openebs-nfs-provisioner-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
configmaps:
- name: openebs-nfs-provisioner-kill
mountPath: /mnt/
--- ---
apiVersion: litmuschaos.io/v1alpha1 apiVersion: litmuschaos.io/v1alpha1
@@ -496,52 +386,55 @@ spec:
apiVersion: litmuschaos.io/v1alpha1 apiVersion: litmuschaos.io/v1alpha1
description: description:
message: | message: |
Kill the cstor target/Jiva controller container and check if gets created again Kill all openebs control plane pod and check if gets scheduled again
kind: ChaosExperiment kind: ChaosExperiment
metadata: metadata:
labels: labels:
litmuschaos.io/name: openebs litmuschaos.io/name: openebs
name: openebs-target-container-failure name: openebs-control-plane-chaos
app.kubernetes.io/part-of: litmus app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest app.kubernetes.io/version: latest
name: openebs-target-container-failure name: openebs-control-plane-chaos
spec: spec:
definition: definition:
scope: Cluster scope: Namespaced
permissions: permissions:
- apiGroups: - apiGroups:
- "" - ""
- "apps"
- "batch"
- "litmuschaos.io" - "litmuschaos.io"
- "storage.k8s.io" - "batch"
- "apps"
resources: resources:
- "jobs"
- "pods" - "pods"
- "events"
- "pods/exec"
- "pods/log" - "pods/log"
- "deployments"
- "events"
- "jobs"
- "configmaps" - "configmaps"
- "secrets"
- "persistentvolumeclaims"
- "storageclasses"
- "persistentvolumes"
- "chaosengines" - "chaosengines"
- "chaosexperiments" - "chaosexperiments"
- "chaosresults" - "chaosresults"
verbs: verbs:
- "create" - "create"
- "delete"
- "get"
- "list" - "list"
- "get"
- "patch" - "patch"
- "update" - "update"
- "delete"
- apiGroups:
- ""
resources:
- "nodes"
verbs:
- "get"
- "list"
image: "litmuschaos/ansible-runner:latest" image: "litmuschaos/ansible-runner:latest"
imagePullPolicy: Always imagePullPolicy: Always
args: args:
- -c - -c
- ansible-playbook ./experiments/openebs/openebs-target-container-failure/openebs_target_container_failure_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 - ansible-playbook ./experiments/openebs/openebs-control-plane-chaos/openebs_control_plane_chaos_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0
command: command:
- /bin/bash - /bin/bash
env: env:
@@ -551,258 +444,23 @@ spec:
- name: OPENEBS_NAMESPACE - name: OPENEBS_NAMESPACE
value: 'openebs' value: 'openebs'
- name: APP_PVC ## Period to wait before injection of chaos
- name: RAMP_TIME
value: '' value: ''
# LIB_IMAGE can be - gaiaadm/pumba:0.6.5, gprasath/crictl:ci
# For pumba image use : gaiaadm/pumba:0.6.5
# For containerd image use : gprasath/crictl:ci
- name: LIB_IMAGE
value: 'gaiaadm/pumba:0.6.5'
# Specify the container runtime used , to pick the relevant chaos util
- name: CONTAINER_RUNTIME
value: 'docker'
# TARGET_CONTAINER values: cstor-volume-mgmt , cstor-istgt
# For cstor-volume-istgt container kill use : cstor-istgt
# For volume-mgmt-kill container use : cstor-volume-mgmt
- name: TARGET_CONTAINER
value: 'cstor-volume-mgmt'
- name: FORCE - name: FORCE
value: 'true'
- name: LIVENESS_APP_LABEL
value: '' value: ''
- name: LIVENESS_APP_NAMESPACE ## env var that describes the library used to execute the chaos
value: '' ## default: litmus. Supported values: litmus, powerfulseal
- name: LIB
value: 'litmus'
- name: DATA_PERSISTENCE
value: ''
- name: DEPLOY_TYPE
value: 'deployment'
# provide the chaos interval
- name: CHAOS_INTERVAL
value: '10'
# provide the total chaos duration
- name: TOTAL_CHAOS_DURATION
value: '20'
- name: SOAK_TIME
value: '60'
labels: labels:
name: openebs-target-container-failure name: openebs-control-plane-chaos
app.kubernetes.io/part-of: litmus app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest app.kubernetes.io/version: latest
#configmaps:
#- name: openebs-target-container-failure
# mountPath: /mnt
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Kill the pool container and check if gets scheduled again
kind: ChaosExperiment
metadata:
labels:
litmuschaos.io/name: openebs
name: openebs-pool-container-failure
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
name: openebs-pool-container-failure
spec:
definition:
scope: Cluster
permissions:
- apiGroups:
- ""
- "extensions"
- "apps"
- "batch"
- "litmuschaos.io"
- "openebs.io"
- "storage.k8s.io"
resources:
- "replicasets"
- "events"
- "jobs"
- "pods"
- "pods/log"
- "pods/exec"
- "configmaps"
- "secrets"
- "persistentvolumeclaims"
- "cstorvolumereplicas"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "get"
- "delete"
- "list"
- "patch"
- "update"
image: "litmuschaos/ansible-runner:latest"
imagePullPolicy: Always
args:
- -c
- ansible-playbook ./experiments/openebs/openebs-pool-container-failure/openebs_pool_container_failure_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0
command:
- /bin/bash
env:
- name: ANSIBLE_STDOUT_CALLBACK
value: 'default'
- name: OPENEBS_NS
value: 'openebs'
- name: APP_PVC
value: ''
- name: LIVENESS_APP_LABEL
value: ''
# only pumba supported
# For pumba image use : gaiaadm/pumba:0.6.5
- name: LIB_IMAGE
value: 'gaiaadm/pumba:0.6.5'
- name: LIVENESS_APP_NAMESPACE
value: ''
# provide the chaos interval
- name: CHAOS_INTERVAL
value: '10'
# provide the total chaos duration
- name: TOTAL_CHAOS_DURATION
value: '20'
- name: DATA_PERSISTENCE
value: ''
- name: CHAOS_ITERATIONS
value: '2'
labels:
name: openebs-pool-container-failure
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
#configmaps:
#- name: openebs-pool-container-failure
# mountPath: /mnt
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Kill the pool pod and check if gets scheduled again
kind: ChaosExperiment
metadata:
labels:
litmuschaos.io/name: openebs
name: openebs-pool-pod-failure
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
name: openebs-pool-pod-failure
spec:
definition:
scope: Cluster
permissions:
- apiGroups:
- ""
- "extensions"
- "apps"
- "batch"
- "litmuschaos.io"
- "openebs.io"
- "storage.k8s.io"
resources:
- "deployments"
- "replicasets"
- "jobs"
- "pods/log"
- "events"
- "pods"
- "configmaps"
- "secrets"
- "storageclasses"
- "persistentvolumeclaims"
- "cstorvolumereplicas"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "get"
- "delete"
- "list"
- "patch"
- "update"
- apiGroups:
- ""
resources:
- "nodes"
verbs:
- "get"
- "list"
image: "litmuschaos/ansible-runner:latest"
imagePullPolicy: Always
args:
- -c
- ansible-playbook ./experiments/openebs/openebs-pool-pod-failure/openebs_pool_pod_failure_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0
command:
- /bin/bash
env:
- name: ANSIBLE_STDOUT_CALLBACK
value: default
- name: OPENEBS_NS
value: 'openebs'
- name: APP_PVC
value: ''
- name: LIVENESS_APP_LABEL
value: ''
- name: LIVENESS_APP_NAMESPACE
value: ''
- name: CHAOS_ITERATIONS
value: '2'
# provide the kill count
- name: KILL_COUNT
value: ''
- name: DATA_PERSISTENCE
value: ''
labels:
name: openebs-pool-pod-failure
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
#configmaps:
#- name: openebs-pool-pod-failure
# mountPath: /mnt
--- ---
--- ---
@@ -917,6 +575,212 @@ spec:
- name: cloud-secret - name: cloud-secret
mountPath: /tmp/ mountPath: /tmp/
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Kill the cstor target/Jiva controller pod and check if gets created again
kind: ChaosExperiment
metadata:
labels:
litmuschaos.io/name: openebs
name: openebs-target-pod-failure
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
name: openebs-target-pod-failure
spec:
definition:
scope: Cluster
permissions:
- apiGroups:
- ""
- "extensions"
- "apps"
- "batch"
- "litmuschaos.io"
- "storage.k8s.io"
resources:
- "deployments"
- "jobs"
- "events"
- "pods"
- "pods/log"
- "pods/exec"
- "configmaps"
- "secrets"
- "services"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
- "persistentvolumeclaims"
- "storageclasses"
- "persistentvolumes"
verbs:
- "create"
- "get"
- "delete"
- "list"
- "patch"
- "update"
- apiGroups:
- ""
resources:
- "nodes"
verbs:
- "get"
- "list"
image: "litmuschaos/ansible-runner:latest"
imagePullPolicy: Always
args:
- -c
- ansible-playbook ./experiments/openebs/openebs-target-pod-failure/openebs_target_pod_failure_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0
command:
- /bin/bash
env:
- name: ANSIBLE_STDOUT_CALLBACK
value: 'default'
- name: OPENEBS_NAMESPACE
value: 'openebs'
- name: APP_PVC
value: ''
- name: FORCE
value: 'true'
- name: LIVENESS_APP_LABEL
value: ''
- name: LIVENESS_APP_NAMESPACE
value: ''
- name: DATA_PERSISTENCE
value: ''
- name: TOTAL_CHAOS_DURATION
value: '60'
# provide the kill count
- name: KILL_COUNT
value: ''
- name: CHAOS_INTERVAL
value: '15'
- name: DEPLOY_TYPE
value: 'deployment'
labels:
name: openebs-target-pod-failure
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
#configmaps:
#- name: openebs-target-pod-failure
# mountPath: /mnt
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Kill the pool container and check if gets scheduled again
kind: ChaosExperiment
metadata:
labels:
litmuschaos.io/name: openebs
name: openebs-pool-container-failure
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
name: openebs-pool-container-failure
spec:
definition:
scope: Cluster
permissions:
- apiGroups:
- ""
- "extensions"
- "apps"
- "batch"
- "litmuschaos.io"
- "openebs.io"
- "storage.k8s.io"
resources:
- "replicasets"
- "events"
- "jobs"
- "pods"
- "pods/log"
- "pods/exec"
- "configmaps"
- "secrets"
- "persistentvolumeclaims"
- "cstorvolumereplicas"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "get"
- "delete"
- "list"
- "patch"
- "update"
image: "litmuschaos/ansible-runner:latest"
imagePullPolicy: Always
args:
- -c
- ansible-playbook ./experiments/openebs/openebs-pool-container-failure/openebs_pool_container_failure_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0
command:
- /bin/bash
env:
- name: ANSIBLE_STDOUT_CALLBACK
value: 'default'
- name: OPENEBS_NS
value: 'openebs'
- name: APP_PVC
value: ''
- name: LIVENESS_APP_LABEL
value: ''
# only pumba supported
# For pumba image use : gaiaadm/pumba:0.6.5
- name: LIB_IMAGE
value: 'gaiaadm/pumba:0.6.5'
- name: LIVENESS_APP_NAMESPACE
value: ''
# provide the chaos interval
- name: CHAOS_INTERVAL
value: '10'
# provide the total chaos duration
- name: TOTAL_CHAOS_DURATION
value: '20'
- name: DATA_PERSISTENCE
value: ''
- name: CHAOS_ITERATIONS
value: '2'
labels:
name: openebs-pool-container-failure
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
#configmaps:
#- name: openebs-pool-container-failure
# mountPath: /mnt
--- ---
apiVersion: litmuschaos.io/v1alpha1 apiVersion: litmuschaos.io/v1alpha1
description: description:
@@ -1018,35 +882,42 @@ spec:
apiVersion: litmuschaos.io/v1alpha1 apiVersion: litmuschaos.io/v1alpha1
description: description:
message: | message: |
Kill all openebs control plane pod and check if gets scheduled again Kill the OpenEBS NFS provisioner container and check if pods consuming the NFS PVs continue to be available and volumes are writable (RWM mode)
kind: ChaosExperiment kind: ChaosExperiment
metadata: metadata:
labels: labels:
litmuschaos.io/name: openebs litmuschaos.io/name: openebs
name: openebs-control-plane-chaos name: openebs-nfs-provisioner-kill
app.kubernetes.io/part-of: litmus app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest app.kubernetes.io/version: latest
name: openebs-control-plane-chaos name: openebs-nfs-provisioner-kill
spec: spec:
definition: definition:
scope: Namespaced scope: Cluster
permissions: permissions:
- apiGroups: - apiGroups:
- "" - ""
- "apps"
- "litmuschaos.io" - "litmuschaos.io"
- "batch" - "batch"
- "apps" - "extensions"
- "storage.k8s.io"
resources: resources:
- "pods" - "pods"
- "pods/exec"
- "pods/log" - "pods/log"
- "deployments" - "deployments"
- "events" - "events"
- "jobs" - "jobs"
- "configmaps" - "configmaps"
- "chaosengines" - "services"
- "persistentvolumeclaims"
- "storageclasses"
- "persistentvolumes"
- "chaosexperiments" - "chaosexperiments"
- "chaosresults" - "chaosresults"
- "chaosengines"
verbs: verbs:
- "create" - "create"
- "list" - "list"
@@ -1066,7 +937,99 @@ spec:
imagePullPolicy: Always imagePullPolicy: Always
args: args:
- -c - -c
- ansible-playbook ./experiments/openebs/openebs-control-plane-chaos/openebs_control_plane_chaos_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 - ansible-playbook ./experiments/openebs/openebs-nfs-provisioner-kill/openebs_nfs_provisioner_kill_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0
command:
- /bin/bash
env:
- name: ANSIBLE_STDOUT_CALLBACK
value: 'default'
# NFS default container
- name: TARGET_CONTAINER
value: 'nfs-provisioner'
# Period to wait before injection of chaos in sec
- name: RAMP_TIME
value: ''
# It supports pumba and containerd
- name: LIB
value: 'pumba'
# LIB_IMAGE can be - gaiaadm/pumba:0.6.5, gprasath/crictl:ci
# For pumba image use: gaiaadm/pumba:0.6.5
# For containerd image use: gprasath/crictl:ci
- name: LIB_IMAGE
value: 'gaiaadm/pumba:0.6.5'
# provide the chaos interval
- name: CHAOS_INTERVAL
value: '10'
# provide the total chaos duration
- name: TOTAL_CHAOS_DURATION
value: '20'
labels:
name: openebs-nfs-provisioner-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
configmaps:
- name: openebs-nfs-provisioner-kill
mountPath: /mnt/
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Kill the cstor target/Jiva controller container and check if gets created again
kind: ChaosExperiment
metadata:
labels:
litmuschaos.io/name: openebs
name: openebs-target-container-failure
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
name: openebs-target-container-failure
spec:
definition:
scope: Cluster
permissions:
- apiGroups:
- ""
- "apps"
- "batch"
- "litmuschaos.io"
- "storage.k8s.io"
resources:
- "jobs"
- "pods"
- "events"
- "pods/exec"
- "pods/log"
- "configmaps"
- "secrets"
- "persistentvolumeclaims"
- "storageclasses"
- "persistentvolumes"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "delete"
- "get"
- "list"
- "patch"
- "update"
image: "litmuschaos/ansible-runner:latest"
imagePullPolicy: Always
args:
- -c
- ansible-playbook ./experiments/openebs/openebs-target-container-failure/openebs_target_container_failure_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0
command: command:
- /bin/bash - /bin/bash
env: env:
@@ -1076,22 +1039,59 @@ spec:
- name: OPENEBS_NAMESPACE - name: OPENEBS_NAMESPACE
value: 'openebs' value: 'openebs'
## Period to wait before injection of chaos - name: APP_PVC
- name: RAMP_TIME
value: '' value: ''
# LIB_IMAGE can be - gaiaadm/pumba:0.6.5, gprasath/crictl:ci
# For pumba image use : gaiaadm/pumba:0.6.5
# For containerd image use : gprasath/crictl:ci
- name: LIB_IMAGE
value: 'gaiaadm/pumba:0.6.5'
# Specify the container runtime used , to pick the relevant chaos util
- name: CONTAINER_RUNTIME
value: 'docker'
# TARGET_CONTAINER values: cstor-volume-mgmt , cstor-istgt
# For cstor-volume-istgt container kill use : cstor-istgt
# For volume-mgmt-kill container use : cstor-volume-mgmt
- name: TARGET_CONTAINER
value: 'cstor-volume-mgmt'
- name: FORCE - name: FORCE
value: 'true'
- name: LIVENESS_APP_LABEL
value: '' value: ''
## env var that describes the library used to execute the chaos - name: LIVENESS_APP_NAMESPACE
## default: litmus. Supported values: litmus, powerfulseal value: ''
- name: LIB
value: 'litmus'
- name: DATA_PERSISTENCE
value: ''
- name: DEPLOY_TYPE
value: 'deployment'
# provide the chaos interval
- name: CHAOS_INTERVAL
value: '10'
# provide the total chaos duration
- name: TOTAL_CHAOS_DURATION
value: '20'
- name: SOAK_TIME
value: '60'
labels: labels:
name: openebs-control-plane-chaos name: openebs-target-container-failure
app.kubernetes.io/part-of: litmus app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest app.kubernetes.io/version: latest
#configmaps:
#- name: openebs-target-container-failure
# mountPath: /mnt
--- ---