From 6ebfd3057ec8d1541b9d142fc1e9d35f6933e353 Mon Sep 17 00:00:00 2001 From: UDIT GAURAV <35391335+uditgaurav@users.noreply.github.com> Date: Tue, 21 Jul 2020 13:16:28 +0530 Subject: [PATCH] charts(ansible): Add ansible chaos charts for running ansible experiments (#272) Signed-off-by: Udit Gaurav Co-authored-by: Karthik Satchitanand --- .../container-kill/ansible/engine.yaml | 37 +++++++++ .../container-kill/ansible/experiment.yaml | 76 ++++++++++++++++++ .../generic/container-kill/ansible/rbac.yaml | 37 +++++++++ charts/generic/disk-fill/ansible/engine.yaml | 32 ++++++++ .../generic/disk-fill/ansible/experiment.yaml | 70 +++++++++++++++++ charts/generic/disk-fill/ansible/rbac.yaml | 34 ++++++++ .../kubelet-service-kill/ansible/engine.yaml | 27 +++++++ .../ansible/experiment.yaml | 61 +++++++++++++++ .../kubelet-service-kill/ansible/rbac.yaml | 37 +++++++++ .../generic/node-cpu-hog/ansible/engine.yaml | 31 ++++++++ .../node-cpu-hog/ansible/experiment.yaml | 74 ++++++++++++++++++ charts/generic/node-cpu-hog/ansible/rbac.yaml | 37 +++++++++ charts/generic/node-cpu-hog/engine.yaml | 2 +- .../node-cpu-hog.chartserviceversion.yaml | 1 - charts/generic/node-drain/ansible/engine.yaml | 28 +++++++ .../node-drain/ansible/experiment.yaml | 77 +++++++++++++++++++ charts/generic/node-drain/ansible/rbac.yaml | 37 +++++++++ .../node-memory-hog/ansible/engine.yaml | 33 ++++++++ .../node-memory-hog/ansible/experiment.yaml | 74 ++++++++++++++++++ .../generic/node-memory-hog/ansible/rbac.yaml | 37 +++++++++ .../generic/pod-cpu-hog/ansible/engine.yaml | 38 +++++++++ .../pod-cpu-hog/ansible/experiment.yaml | 62 +++++++++++++++ charts/generic/pod-cpu-hog/ansible/rbac.yaml | 36 +++++++++ charts/generic/pod-delete/ansible/engine.yaml | 36 +++++++++ .../pod-delete/ansible/experiment.yaml | 77 +++++++++++++++++++ .../pod-delete/ansible/powerfulseal_rbac.yaml | 38 +++++++++ charts/generic/pod-delete/ansible/rbac.yaml | 37 +++++++++ .../pod-memory-hog/ansible/engine.yaml | 38 +++++++++ .../pod-memory-hog/ansible/experiment.yaml | 66 ++++++++++++++++ .../generic/pod-memory-hog/ansible/rbac.yaml | 36 +++++++++ .../ansible/engine.yaml | 36 +++++++++ .../ansible/experiment.yaml | 66 ++++++++++++++++ .../pod-network-corruption/ansible/rbac.yaml | 36 +++++++++ .../pod-network-latency/ansible/engine.yaml | 42 ++++++++++ .../ansible/experiment.yaml | 66 ++++++++++++++++ .../pod-network-latency/ansible/rbac.yaml | 36 +++++++++ .../pod-network-loss/ansible/engine.yaml | 44 +++++++++++ .../pod-network-loss/ansible/experiment.yaml | 66 ++++++++++++++++ .../pod-network-loss/ansible/rbac.yaml | 35 +++++++++ 39 files changed, 1731 insertions(+), 2 deletions(-) create mode 100644 charts/generic/container-kill/ansible/engine.yaml create mode 100644 charts/generic/container-kill/ansible/experiment.yaml create mode 100644 charts/generic/container-kill/ansible/rbac.yaml create mode 100644 charts/generic/disk-fill/ansible/engine.yaml create mode 100644 charts/generic/disk-fill/ansible/experiment.yaml create mode 100644 charts/generic/disk-fill/ansible/rbac.yaml create mode 100644 charts/generic/kubelet-service-kill/ansible/engine.yaml create mode 100644 charts/generic/kubelet-service-kill/ansible/experiment.yaml create mode 100644 charts/generic/kubelet-service-kill/ansible/rbac.yaml create mode 100644 charts/generic/node-cpu-hog/ansible/engine.yaml create mode 100644 charts/generic/node-cpu-hog/ansible/experiment.yaml create mode 100644 charts/generic/node-cpu-hog/ansible/rbac.yaml create mode 100644 charts/generic/node-drain/ansible/engine.yaml create mode 100644 charts/generic/node-drain/ansible/experiment.yaml create mode 100644 charts/generic/node-drain/ansible/rbac.yaml create mode 100644 charts/generic/node-memory-hog/ansible/engine.yaml create mode 100644 charts/generic/node-memory-hog/ansible/experiment.yaml create mode 100644 charts/generic/node-memory-hog/ansible/rbac.yaml create mode 100644 charts/generic/pod-cpu-hog/ansible/engine.yaml create mode 100644 charts/generic/pod-cpu-hog/ansible/experiment.yaml create mode 100644 charts/generic/pod-cpu-hog/ansible/rbac.yaml create mode 100644 charts/generic/pod-delete/ansible/engine.yaml create mode 100644 charts/generic/pod-delete/ansible/experiment.yaml create mode 100644 charts/generic/pod-delete/ansible/powerfulseal_rbac.yaml create mode 100644 charts/generic/pod-delete/ansible/rbac.yaml create mode 100644 charts/generic/pod-memory-hog/ansible/engine.yaml create mode 100644 charts/generic/pod-memory-hog/ansible/experiment.yaml create mode 100644 charts/generic/pod-memory-hog/ansible/rbac.yaml create mode 100644 charts/generic/pod-network-corruption/ansible/engine.yaml create mode 100644 charts/generic/pod-network-corruption/ansible/experiment.yaml create mode 100644 charts/generic/pod-network-corruption/ansible/rbac.yaml create mode 100644 charts/generic/pod-network-latency/ansible/engine.yaml create mode 100644 charts/generic/pod-network-latency/ansible/experiment.yaml create mode 100644 charts/generic/pod-network-latency/ansible/rbac.yaml create mode 100644 charts/generic/pod-network-loss/ansible/engine.yaml create mode 100644 charts/generic/pod-network-loss/ansible/experiment.yaml create mode 100644 charts/generic/pod-network-loss/ansible/rbac.yaml diff --git a/charts/generic/container-kill/ansible/engine.yaml b/charts/generic/container-kill/ansible/engine.yaml new file mode 100644 index 0000000..134a30e --- /dev/null +++ b/charts/generic/container-kill/ansible/engine.yaml @@ -0,0 +1,37 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: nginx-chaos + namespace: default +spec: + # It can be true/false + annotationCheck: 'true' + # It can be active/stop + engineState: 'active' + #ex. values: ns1:name=percona,ns2:run=nginx + auxiliaryAppInfo: '' + appinfo: + appns: 'default' + applabel: 'app=nginx' + appkind: 'deployment' + chaosServiceAccount: container-kill-sa + monitoring: false + # It can be delete/retain + jobCleanUpPolicy: 'delete' + experiments: + - name: container-kill + spec: + components: + env: + # specify the name of the container to be killed + - name: TARGET_CONTAINER + value: 'nginx' + + # provide the chaos interval + - name: CHAOS_INTERVAL + value: '10' + + # provide the total chaos duration + - name: TOTAL_CHAOS_DURATION + value: '20' + \ No newline at end of file diff --git a/charts/generic/container-kill/ansible/experiment.yaml b/charts/generic/container-kill/ansible/experiment.yaml new file mode 100644 index 0000000..41a2e0a --- /dev/null +++ b/charts/generic/container-kill/ansible/experiment.yaml @@ -0,0 +1,76 @@ +apiVersion: litmuschaos.io/v1alpha1 +description: + message: "Kills a container belonging to an application pod \n" +kind: ChaosExperiment +metadata: + name: container-kill + version: 0.1.21 +spec: + definition: + scope: Namespaced + permissions: + - apiGroups: + - "" + - "apps" + - "batch" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/log" + - "events" + - "pods/exec" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "update" + - "patch" + - "delete" + image: "litmuschaos/ansible-runner:1.6.0" + imagePullPolicy: Always + args: + - -c + - ansible-playbook ./experiments/generic/container_kill/container_kill_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + command: + - /bin/bash + env: + + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' + + - name: TARGET_CONTAINER + value: '' + + # Period to wait before injection of chaos in sec + - name: RAMP_TIME + value: '' + + # It supports pumba and containerd + - name: LIB + value: 'pumba' + + # provide the chaos interval + - name: CHAOS_INTERVAL + value: '10' + + # provide the container runtime path for containerd + # applicable only for containerd runtime + - name: CONTAINER_PATH + value: '/run/containerd/containerd.sock' + + # provide the total chaos duration + - name: TOTAL_CHAOS_DURATION + value: '20' + + # LIB_IMAGE can be - gaiaadm/pumba:0.6.5, litmuschaos/container-kill-helper:latest + # For pumba image use: gaiaadm/pumba:0.6.5 + # For containerd image use: litmuschaos/container-kill-helper:latest + - name: LIB_IMAGE + value: 'gaiaadm/pumba:0.6.5' + + labels: + name: container-kill diff --git a/charts/generic/container-kill/ansible/rbac.yaml b/charts/generic/container-kill/ansible/rbac.yaml new file mode 100644 index 0000000..45f218b --- /dev/null +++ b/charts/generic/container-kill/ansible/rbac.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: container-kill-sa + namespace: default + labels: + name: container-kill-sa +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: container-kill-sa + namespace: default + labels: + name: container-kill-sa +rules: +- apiGroups: ["","litmuschaos.io","batch","apps"] + resources: ["pods","jobs","pods/exec","pods/log","events","chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update","delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: container-kill-sa + namespace: default + labels: + name: container-kill-sa +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: container-kill-sa +subjects: +- kind: ServiceAccount + name: container-kill-sa + namespace: default + diff --git a/charts/generic/disk-fill/ansible/engine.yaml b/charts/generic/disk-fill/ansible/engine.yaml new file mode 100644 index 0000000..1cf4aa3 --- /dev/null +++ b/charts/generic/disk-fill/ansible/engine.yaml @@ -0,0 +1,32 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: nginx-chaos + namespace: default +spec: + # It can be true/false + annotationCheck: 'false' + # It can be active/stop + engineState: 'active' + #ex. values: ns1:name=percona,ns2:run=nginx + auxiliaryAppInfo: '' + appinfo: + appns: 'default' + applabel: 'app=nginx' + appkind: 'deployment' + chaosServiceAccount: disk-fill-sa + monitoring: false + # It can be delete/retain + jobCleanUpPolicy: 'delete' + experiments: + - name: disk-fill + spec: + components: + env: + # specify the fill percentage according to the disk pressure required + - name: FILL_PERCENTAGE + value: '80' + + - name: TARGET_CONTAINER + value: 'nginx' + \ No newline at end of file diff --git a/charts/generic/disk-fill/ansible/experiment.yaml b/charts/generic/disk-fill/ansible/experiment.yaml new file mode 100644 index 0000000..eb0edac --- /dev/null +++ b/charts/generic/disk-fill/ansible/experiment.yaml @@ -0,0 +1,70 @@ +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Fillup Ephemeral Storage of a Resource +kind: ChaosExperiment +metadata: + name: disk-fill + version: 0.1.13 +spec: + definition: + scope: Cluster + permissions: + - apiGroups: + - "" + - "apps" + - "batch" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/exec" + - "pods/log" + - "events" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "patch" + - "update" + - "delete" + image: "litmuschaos/ansible-runner:1.6.0" + imagePullPolicy: Always + args: + - -c + - ansible-playbook ./experiments/generic/disk_fill/disk_fill_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + command: + - /bin/bash + env: + + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' + + - name: TARGET_CONTAINER + value: '' + + - name: FILL_PERCENTAGE + value: '80' + + - name: TOTAL_CHAOS_DURATION + value: '60' + + # Period to wait before injection of chaos in sec + - name: RAMP_TIME + value: '' + + # Provide the LIB here + # Only litmus supported + - name: LIB + value: 'litmus' + + # Provide the container runtime path + # Default set to docker + - name: CONTAINER_PATH + value: '/var/lib/docker/containers' + + labels: + name: disk-fill diff --git a/charts/generic/disk-fill/ansible/rbac.yaml b/charts/generic/disk-fill/ansible/rbac.yaml new file mode 100644 index 0000000..b5253af --- /dev/null +++ b/charts/generic/disk-fill/ansible/rbac.yaml @@ -0,0 +1,34 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: disk-fill-sa + namespace: default + labels: + name: disk-fill-sa +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: disk-fill-sa + labels: + name: disk-fill-sa +rules: +- apiGroups: ["","apps","litmuschaos.io","batch"] + resources: ["pods","jobs","pods/exec","events","pods/log","chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update","delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: disk-fill-sa + labels: + name: disk-fill-sa +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: disk-fill-sa +subjects: +- kind: ServiceAccount + name: disk-fill-sa + namespace: default \ No newline at end of file diff --git a/charts/generic/kubelet-service-kill/ansible/engine.yaml b/charts/generic/kubelet-service-kill/ansible/engine.yaml new file mode 100644 index 0000000..2cfc807 --- /dev/null +++ b/charts/generic/kubelet-service-kill/ansible/engine.yaml @@ -0,0 +1,27 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: nginx-chaos + namespace: default +spec: + # It can be true/false + annotationCheck: 'false' + # It can be active/stop + engineState: 'active' + #ex. values: ns1:name=percona,ns2:run=nginx + auxiliaryAppInfo: '' + appinfo: + appns: 'default' + applabel: 'app=nginx' + appkind: 'deployment' + chaosServiceAccount: kubelet-service-kill-sa + monitoring: false + # It can be delete/retain + jobCleanUpPolicy: 'delete' + experiments: + - name: kubelet-service-kill + spec: + components: + env: + - name: TOTAL_CHAOS_DURATION + value: '90' # in seconds diff --git a/charts/generic/kubelet-service-kill/ansible/experiment.yaml b/charts/generic/kubelet-service-kill/ansible/experiment.yaml new file mode 100644 index 0000000..824007f --- /dev/null +++ b/charts/generic/kubelet-service-kill/ansible/experiment.yaml @@ -0,0 +1,61 @@ +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Kills the kubelet service on the application node to check the resiliency. +kind: ChaosExperiment +metadata: + name: kubelet-service-kill + version: 0.1.2 +spec: + definition: + scope: Cluster + permissions: + - apiGroups: + - "" + - "batch" + - "apps" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/log" + - "events" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "patch" + - "update" + - "delete" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" + image: "litmuschaos/ansible-runner:1.6.0" + imagePullPolicy: Always + args: + - -c + - ansible-playbook ./experiments/generic/kubelet_service_kill/kubelet_service_kill_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + command: + - /bin/bash + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' + + - name: TOTAL_CHAOS_DURATION + value: '90' # in seconds + + # Period to wait before injection of chaos in sec + - name: RAMP_TIME + value: '' + + - name: LIB + value: 'litmus' + labels: + name: kubelet-service-kill diff --git a/charts/generic/kubelet-service-kill/ansible/rbac.yaml b/charts/generic/kubelet-service-kill/ansible/rbac.yaml new file mode 100644 index 0000000..5c6139f --- /dev/null +++ b/charts/generic/kubelet-service-kill/ansible/rbac.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kubelet-service-kill + namespace: default + labels: + name: kubelet-service-kill +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: kubelet-service-kill + labels: + name: kubelet-service-kill +rules: +- apiGroups: ["","litmuschaos.io","batch","apps"] + resources: ["pods","jobs","pods/log","events","chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update","delete"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get","list"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: kubelet-service-kill + labels: + name: kubelet-service-kill +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubelet-service-kill +subjects: +- kind: ServiceAccount + name: kubelet-service-kill + namespace: default diff --git a/charts/generic/node-cpu-hog/ansible/engine.yaml b/charts/generic/node-cpu-hog/ansible/engine.yaml new file mode 100644 index 0000000..532f6cf --- /dev/null +++ b/charts/generic/node-cpu-hog/ansible/engine.yaml @@ -0,0 +1,31 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: nginx-chaos + namespace: default +spec: + # It can be true/false + annotationCheck: 'false' + # It can be active/stop + engineState: 'active' + #ex. values: ns1:name=percona,ns2:run=nginx + auxiliaryAppInfo: '' + appinfo: + appns: 'default' + applabel: 'app=nginx' + appkind: 'deployment' + chaosServiceAccount: node-cpu-hog-sa + monitoring: false + # It can be delete/retain + jobCleanUpPolicy: 'delete' + experiments: + - name: node-cpu-hog + spec: + components: + env: + # set chaos duration (in sec) as desired + - name: TOTAL_CHAOS_DURATION + value: '60' + + - name: NODE_CPU_CORE + value: '' \ No newline at end of file diff --git a/charts/generic/node-cpu-hog/ansible/experiment.yaml b/charts/generic/node-cpu-hog/ansible/experiment.yaml new file mode 100644 index 0000000..7dc1456 --- /dev/null +++ b/charts/generic/node-cpu-hog/ansible/experiment.yaml @@ -0,0 +1,74 @@ +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Give a cpu spike on a node belonging to a deployment +kind: ChaosExperiment +metadata: + name: node-cpu-hog + version: 0.1.19 +spec: + definition: + scope: Cluster + permissions: + - apiGroups: + - "" + - "batch" + - "apps" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/log" + - "events" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "patch" + - "update" + - "delete" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" + image: "litmuschaos/ansible-runner:1.6.0" + imagePullPolicy: Always + args: + - -c + - ansible-playbook ./experiments/generic/node_cpu_hog/node_cpu_hog_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + command: + - /bin/bash + env: + + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' + + - name: TOTAL_CHAOS_DURATION + value: '60' + + # Period to wait before injection of chaos in sec + - name: RAMP_TIME + value: '' + + ## ENTER THE NUMBER OF CORES OF CPU FOR CPU HOGGING + ## OPTIONAL VALUE IN CASE OF EMPTY VALUE IT WILL TAKE NODE CPU CAPACITY + - name: NODE_CPU_CORE + value: '' + + # PROVIDE THE LIB HERE + # ONLY LITMUS SUPPORTED + - name: LIB + value: 'litmus' + + # provide lib image + - name: LIB_IMAGE + value: 'litmuschaos/stress-ng:latest' + + labels: + name: node-cpu-hog diff --git a/charts/generic/node-cpu-hog/ansible/rbac.yaml b/charts/generic/node-cpu-hog/ansible/rbac.yaml new file mode 100644 index 0000000..19b2bb1 --- /dev/null +++ b/charts/generic/node-cpu-hog/ansible/rbac.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-cpu-hog-sa + namespace: default + labels: + name: node-cpu-hog-sa +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: node-cpu-hog-sa + labels: + name: node-cpu-hog-sa +rules: +- apiGroups: ["","litmuschaos.io","batch","apps"] + resources: ["pods","jobs","events","chaosengines","pods/log","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update","delete"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get","list"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: node-cpu-hog-sa + labels: + name: node-cpu-hog-sa +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: node-cpu-hog-sa +subjects: +- kind: ServiceAccount + name: node-cpu-hog-sa + namespace: default diff --git a/charts/generic/node-cpu-hog/engine.yaml b/charts/generic/node-cpu-hog/engine.yaml index 532f6cf..ac0e1a5 100644 --- a/charts/generic/node-cpu-hog/engine.yaml +++ b/charts/generic/node-cpu-hog/engine.yaml @@ -28,4 +28,4 @@ spec: value: '60' - name: NODE_CPU_CORE - value: '' \ No newline at end of file + value: '' diff --git a/charts/generic/node-cpu-hog/node-cpu-hog.chartserviceversion.yaml b/charts/generic/node-cpu-hog/node-cpu-hog.chartserviceversion.yaml index d66452a..b9f1f03 100644 --- a/charts/generic/node-cpu-hog/node-cpu-hog.chartserviceversion.yaml +++ b/charts/generic/node-cpu-hog/node-cpu-hog.chartserviceversion.yaml @@ -14,7 +14,6 @@ spec: Node CPU hog contains chaos to disrupt the state of Kubernetes resources. Experiments can inject a CPU spike on a node where the application pod is scheduled. - CPU hog on a particular node where the application deployment is available. - After test, the recovery should be manual for the application pod and node in case they are not in an appropriate state. - keywords: - Kubernetes - CPU diff --git a/charts/generic/node-drain/ansible/engine.yaml b/charts/generic/node-drain/ansible/engine.yaml new file mode 100644 index 0000000..85adde1 --- /dev/null +++ b/charts/generic/node-drain/ansible/engine.yaml @@ -0,0 +1,28 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: nginx-chaos + namespace: default +spec: + # It can be true/false + annotationCheck: 'false' + # It can be active/stop + engineState: 'active' + #ex. values: ns1:name=percona,ns2:run=nginx + auxiliaryAppInfo: '' + appinfo: + appns: 'default' + applabel: 'app=nginx' + appkind: 'deployment' + chaosServiceAccount: node-drain-sa + monitoring: false + # It can be delete/retain + jobCleanUpPolicy: 'delete' + experiments: + - name: node-drain + spec: + components: + env: + # set node name + - name: APP_NODE + value: 'node-1' \ No newline at end of file diff --git a/charts/generic/node-drain/ansible/experiment.yaml b/charts/generic/node-drain/ansible/experiment.yaml new file mode 100644 index 0000000..e36d752 --- /dev/null +++ b/charts/generic/node-drain/ansible/experiment.yaml @@ -0,0 +1,77 @@ +--- +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Drain the node where application pod is scheduled +kind: ChaosExperiment +metadata: + name: node-drain + version: 0.1.12 +spec: + definition: + scope: Cluster + permissions: + - apiGroups: + - "" + - "batch" + - "extensions" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "events" + - "pods/log" + - "daemonsets" + - "pods/eviction" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "patch" + - "update" + - "delete" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" + - "patch" + image: "litmuschaos/ansible-runner:1.6.0" + imagePullPolicy: Always + args: + - -c + - ansible-playbook ./experiments/generic/node_drain/node_drain_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + command: + - /bin/bash + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' + + - name: LIVENESS_APP_LABEL + value: '' + + - name: APP_NODE + value: '' + + - name: TOTAL_CHAOS_DURATION + value: '60' + + # Provide the LIB here + # Only litmus supported + - name: LIB + value: 'litmus' + + # Period to wait before injection of chaos in sec + - name: RAMP_TIME + value: '' + + - name: LIVENESS_APP_NAMESPACE + value: '' + + labels: + name: node-drain diff --git a/charts/generic/node-drain/ansible/rbac.yaml b/charts/generic/node-drain/ansible/rbac.yaml new file mode 100644 index 0000000..7851d25 --- /dev/null +++ b/charts/generic/node-drain/ansible/rbac.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-drain-sa + namespace: default + labels: + name: node-drain-sa +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: node-drain-sa + labels: + name: node-drain-sa +rules: +- apiGroups: ["","litmuschaos.io","batch","extensions"] + resources: ["pods","jobs","events","chaosengines","pods/log","daemonsets","pods/eviction","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update","delete"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["patch","get","list"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: node-drain-sa + labels: + name: node-drain-sa +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: node-drain-sa +subjects: +- kind: ServiceAccount + name: node-drain-sa + namespace: default \ No newline at end of file diff --git a/charts/generic/node-memory-hog/ansible/engine.yaml b/charts/generic/node-memory-hog/ansible/engine.yaml new file mode 100644 index 0000000..32ec9ec --- /dev/null +++ b/charts/generic/node-memory-hog/ansible/engine.yaml @@ -0,0 +1,33 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: nginx-chaos + namespace: default +spec: + # It can be true/false + annotationCheck: 'false' + # It can be active/stop + engineState: 'active' + #ex. values: ns1:name=percona,ns2:run=nginx + auxiliaryAppInfo: '' + appinfo: + appns: 'default' + applabel: 'app=nginx' + appkind: 'deployment' + chaosServiceAccount: node-memory-hog-sa + monitoring: false + # It can be delete/retain + jobCleanUpPolicy: 'delete' + experiments: + - name: node-memory-hog + spec: + components: + env: + # set chaos duration (in sec) as desired + - name: TOTAL_CHAOS_DURATION + value: '120' + + ## specify the size as percent of total available memory (in percentage %) + ## default value 90% + - name: MEMORY_PERCENTAGE + value: '90' \ No newline at end of file diff --git a/charts/generic/node-memory-hog/ansible/experiment.yaml b/charts/generic/node-memory-hog/ansible/experiment.yaml new file mode 100644 index 0000000..ddc84a9 --- /dev/null +++ b/charts/generic/node-memory-hog/ansible/experiment.yaml @@ -0,0 +1,74 @@ +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Give a memory hog on a node belonging to a deployment +kind: ChaosExperiment +metadata: + name: node-memory-hog + version: 0.1.9 +spec: + definition: + scope: Cluster + permissions: + - apiGroups: + - "" + - "batch" + - "apps" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/log" + - "events" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "patch" + - "update" + - "delete" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" + image: "litmuschaos/ansible-runner:1.6.0" + imagePullPolicy: Always + args: + - -c + - ansible-playbook ./experiments/generic/node_memory_hog/node_memory_hog_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + command: + - /bin/bash + env: + + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' + + - name: TOTAL_CHAOS_DURATION + value: '120' + + ## specify the size as percent of total available memory (in percentage %) + ## Default value "90%" + - name: MEMORY_PERCENTAGE + value: '90' + + # Period to wait before injection of chaos in sec + - name: RAMP_TIME + value: '' + + # Provide the LIB here + # Only litmus supported + - name: LIB + value: 'litmus' + + # provide lib image + - name: LIB_IMAGE + value: 'litmuschaos/stress-ng:latest' + + labels: + name: node-memory-hog diff --git a/charts/generic/node-memory-hog/ansible/rbac.yaml b/charts/generic/node-memory-hog/ansible/rbac.yaml new file mode 100644 index 0000000..a1f08c5 --- /dev/null +++ b/charts/generic/node-memory-hog/ansible/rbac.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-memory-hog-sa + namespace: default + labels: + name: node-memory-hog-sa +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: node-memory-hog-sa + labels: + name: node-memory-hog-sa +rules: +- apiGroups: ["","litmuschaos.io","batch","apps"] + resources: ["pods","jobs","pods/log","events","chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update","delete"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get","list"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: node-memory-hog-sa + labels: + name: node-memory-hog-sa +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: node-memory-hog-sa +subjects: +- kind: ServiceAccount + name: node-memory-hog-sa + namespace: default diff --git a/charts/generic/pod-cpu-hog/ansible/engine.yaml b/charts/generic/pod-cpu-hog/ansible/engine.yaml new file mode 100644 index 0000000..b24244f --- /dev/null +++ b/charts/generic/pod-cpu-hog/ansible/engine.yaml @@ -0,0 +1,38 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: nginx-chaos + namespace: default +spec: + # It can be true/false + annotationCheck: 'true' + # It can be active/stop + engineState: 'active' + #ex. values: ns1:name=percona,ns2:run=nginx + auxiliaryAppInfo: '' + appinfo: + appns: 'default' + applabel: 'app=nginx' + appkind: 'deployment' + chaosServiceAccount: pod-cpu-hog-sa + monitoring: false + # It can be delete/retain + jobCleanUpPolicy: 'delete' + experiments: + - name: pod-cpu-hog + spec: + components: + env: + # Provide name of target container + # where chaos has to be injected + - name: TARGET_CONTAINER + value: 'nginx' + + #number of cpu cores to be consumed + #verify the resources the app has been launched with + - name: CPU_CORES + value: '1' + + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds + \ No newline at end of file diff --git a/charts/generic/pod-cpu-hog/ansible/experiment.yaml b/charts/generic/pod-cpu-hog/ansible/experiment.yaml new file mode 100644 index 0000000..a1b906d --- /dev/null +++ b/charts/generic/pod-cpu-hog/ansible/experiment.yaml @@ -0,0 +1,62 @@ +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Injects cpu consumption on pods belonging to an app deployment +kind: ChaosExperiment +metadata: + name: pod-cpu-hog + version: 0.1.11 +spec: + definition: + scope: Namespaced + permissions: + - apiGroups: + - "" + - "batch" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/log" + - "events" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "patch" + - "update" + - "delete" + image: "litmuschaos/ansible-runner:1.6.0" + imagePullPolicy: Always + args: + - -c + - ansible-playbook ./experiments/generic/pod_cpu_hog/pod_cpu_hog_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + command: + - /bin/bash + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' + + - name: TARGET_CONTAINER + value: '' + + - name: CPU_CORES + value: '1' + + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds + + # Period to wait before injection of chaos in sec + - name: RAMP_TIME + value: '' + + - name: LIB + value: 'litmus' + + - name: LIB_IMAGE + value: 'litmuschaos/app-cpu-stress:latest' + labels: + name: pod-cpu-hog diff --git a/charts/generic/pod-cpu-hog/ansible/rbac.yaml b/charts/generic/pod-cpu-hog/ansible/rbac.yaml new file mode 100644 index 0000000..bf8f8c3 --- /dev/null +++ b/charts/generic/pod-cpu-hog/ansible/rbac.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pod-cpu-hog-sa + namespace: default + labels: + name: pod-cpu-hog-sa +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: pod-cpu-hog-sa + namespace: default + labels: + name: pod-cpu-hog-sa +rules: +- apiGroups: ["","litmuschaos.io","batch"] + resources: ["pods","jobs","events","pods/log","chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update","delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: pod-cpu-hog-sa + namespace: default + labels: + name: pod-cpu-hog-sa +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-cpu-hog-sa +subjects: +- kind: ServiceAccount + name: pod-cpu-hog-sa + namespace: default diff --git a/charts/generic/pod-delete/ansible/engine.yaml b/charts/generic/pod-delete/ansible/engine.yaml new file mode 100644 index 0000000..ebf5e3e --- /dev/null +++ b/charts/generic/pod-delete/ansible/engine.yaml @@ -0,0 +1,36 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: nginx-chaos + namespace: default +spec: + appinfo: + appns: 'default' + applabel: 'app=nginx' + appkind: 'deployment' + # It can be true/false + annotationCheck: 'true' + # It can be active/stop + engineState: 'active' + #ex. values: ns1:name=percona,ns2:run=nginx + auxiliaryAppInfo: '' + chaosServiceAccount: pod-delete-sa + monitoring: false + # It can be delete/retain + jobCleanUpPolicy: 'delete' + experiments: + - name: pod-delete + spec: + components: + env: + # set chaos duration (in sec) as desired + - name: TOTAL_CHAOS_DURATION + value: '30' + + # set chaos interval (in sec) as desired + - name: CHAOS_INTERVAL + value: '10' + + # pod failures without '--force' & default terminationGracePeriodSeconds + - name: FORCE + value: 'false' diff --git a/charts/generic/pod-delete/ansible/experiment.yaml b/charts/generic/pod-delete/ansible/experiment.yaml new file mode 100644 index 0000000..4595584 --- /dev/null +++ b/charts/generic/pod-delete/ansible/experiment.yaml @@ -0,0 +1,77 @@ +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Deletes a pod belonging to a deployment/statefulset/daemonset +kind: ChaosExperiment +metadata: + name: pod-delete + version: 0.1.17 +spec: + definition: + scope: Namespaced + permissions: + - apiGroups: + - "" + - "apps" + - "batch" + - "litmuschaos.io" + resources: + - "deployments" + - "jobs" + - "pods" + - "pods/log" + - "events" + - "configmaps" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "patch" + - "update" + - "delete" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" + image: "litmuschaos/ansible-runner:1.6.0" + imagePullPolicy: Always + args: + - -c + - ansible-playbook ./experiments/generic/pod_delete/pod_delete_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + command: + - /bin/bash + env: + + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' + + - name: TOTAL_CHAOS_DURATION + value: '15' + + # Period to wait before injection of chaos in sec + - name: RAMP_TIME + value: '' + + # provide the kill count + - name: KILL_COUNT + value: '' + + - name: FORCE + value: 'true' + + - name: CHAOS_INTERVAL + value: '5' + + - name: LIB_IMAGE + value: 'litmuschaos/pod-delete-helper:latest' + + - name: LIB + value: 'litmus' + labels: + name: pod-delete diff --git a/charts/generic/pod-delete/ansible/powerfulseal_rbac.yaml b/charts/generic/pod-delete/ansible/powerfulseal_rbac.yaml new file mode 100644 index 0000000..911f211 --- /dev/null +++ b/charts/generic/pod-delete/ansible/powerfulseal_rbac.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pod-delete-sa + namespace: default + labels: + name: pod-delete-sa +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: pod-delete-sa + labels: + name: pod-delete-sa +rules: +- apiGroups: ["","litmuschaos.io","batch","apps"] + resources: ["pods","deployments","pods/log","events","jobs","configmaps","chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update","delete"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get","list"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: pod-delete-sa + labels: + name: pod-delete-sa +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pod-delete-sa +subjects: +- kind: ServiceAccount + name: pod-delete-sa + namespace: default + diff --git a/charts/generic/pod-delete/ansible/rbac.yaml b/charts/generic/pod-delete/ansible/rbac.yaml new file mode 100644 index 0000000..e9a820b --- /dev/null +++ b/charts/generic/pod-delete/ansible/rbac.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pod-delete-sa + namespace: default + labels: + name: pod-delete-sa +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: pod-delete-sa + namespace: default + labels: + name: pod-delete-sa +rules: +- apiGroups: ["","litmuschaos.io","batch","apps"] + resources: ["pods","deployments","pods/log","events","jobs","chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update","delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: pod-delete-sa + namespace: default + labels: + name: pod-delete-sa +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-delete-sa +subjects: +- kind: ServiceAccount + name: pod-delete-sa + namespace: default + diff --git a/charts/generic/pod-memory-hog/ansible/engine.yaml b/charts/generic/pod-memory-hog/ansible/engine.yaml new file mode 100644 index 0000000..69731ca --- /dev/null +++ b/charts/generic/pod-memory-hog/ansible/engine.yaml @@ -0,0 +1,38 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: nginx-chaos + namespace: default +spec: + # It can be true/false + annotationCheck: 'true' + # It can be active/stop + engineState: 'active' + #ex. values: ns1:name=percona,ns2:run=nginx + auxiliaryAppInfo: '' + appinfo: + appns: 'default' + applabel: 'app=nginx' + appkind: 'deployment' + chaosServiceAccount: pod-memory-hog-sa + monitoring: false + # It can be delete/retain + jobCleanUpPolicy: 'delete' + experiments: + - name: pod-memory-hog + spec: + components: + env: + # Provide name of target container + # where chaos has to be injected + - name: TARGET_CONTAINER + value: 'nginx' + + # Enter the amount of memory in megabytes to be consumed by the application pod + # default: 500 (Megabytes) + - name: MEMORY_CONSUMPTION + value: '500' + + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds + diff --git a/charts/generic/pod-memory-hog/ansible/experiment.yaml b/charts/generic/pod-memory-hog/ansible/experiment.yaml new file mode 100644 index 0000000..e8068ca --- /dev/null +++ b/charts/generic/pod-memory-hog/ansible/experiment.yaml @@ -0,0 +1,66 @@ +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Injects memory consumption on pods belonging to an app deployment +kind: ChaosExperiment +metadata: + name: pod-memory-hog + version: 0.1.3 +spec: + definition: + scope: Namespaced + permissions: + - apiGroups: + - "" + - "batch" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/log" + - "events" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "patch" + - "update" + - "delete" + image: "litmuschaos/ansible-runner:1.6.0" + imagePullPolicy: Always + args: + - -c + - ansible-playbook ./experiments/generic/pod_memory_hog/pod_memory_hog_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + command: + - /bin/bash + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' + + # Provide name of target container + # where chaos has to be injected + - name: TARGET_CONTAINER + value: '' + + # Enter the amount of memory in megabytes to be consumed by the application pod + # default: 500 (Megabytes) + - name: MEMORY_CONSUMPTION + value: '500' + + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds + + # Period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: '' + + - name: LIB + value: 'litmus' + + - name: LIB_IMAGE + value: 'litmuschaos/app-memory-stress:latest' + labels: + name: pod-memory-hog diff --git a/charts/generic/pod-memory-hog/ansible/rbac.yaml b/charts/generic/pod-memory-hog/ansible/rbac.yaml new file mode 100644 index 0000000..4f63b60 --- /dev/null +++ b/charts/generic/pod-memory-hog/ansible/rbac.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pod-memory-hog-sa + namespace: default + labels: + name: pod-memory-hog-sa +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: pod-memory-hog-sa + namespace: default + labels: + name: pod-memory-hog-sa +rules: +- apiGroups: ["","litmuschaos.io","batch"] + resources: ["pods","jobs","events","pods/log","chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update","delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: pod-memory-hog-sa + namespace: default + labels: + name: pod-memory-hog-sa +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-memory-hog-sa +subjects: +- kind: ServiceAccount + name: pod-memory-hog-sa + namespace: default diff --git a/charts/generic/pod-network-corruption/ansible/engine.yaml b/charts/generic/pod-network-corruption/ansible/engine.yaml new file mode 100644 index 0000000..3d1a9c3 --- /dev/null +++ b/charts/generic/pod-network-corruption/ansible/engine.yaml @@ -0,0 +1,36 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: nginx-network-chaos + namespace: default +spec: + # It can be delete/retain + jobCleanUpPolicy: 'delete' + # It can be true/false + annotationCheck: 'true' + # It can be active/stop + engineState: 'active' + #ex. values: ns1:name=percona,ns2:run=nginx + auxiliaryAppInfo: '' + monitoring: false + appinfo: + appns: 'default' + # FYI, To see app label, apply kubectl get pods --show-labels + applabel: 'app=nginx' + appkind: 'deployment' + chaosServiceAccount: pod-network-corruption-sa + experiments: + - name: pod-network-corruption + spec: + components: + env: + #Container name where chaos has to be injected + - name: TARGET_CONTAINER + value: 'nginx' + + #Network interface inside target container + - name: NETWORK_INTERFACE + value: 'eth0' + + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds \ No newline at end of file diff --git a/charts/generic/pod-network-corruption/ansible/experiment.yaml b/charts/generic/pod-network-corruption/ansible/experiment.yaml new file mode 100644 index 0000000..74611e6 --- /dev/null +++ b/charts/generic/pod-network-corruption/ansible/experiment.yaml @@ -0,0 +1,66 @@ +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Inject network packet corruption into application pod +kind: ChaosExperiment +metadata: + name: pod-network-corruption + version: 0.1.10 +spec: + definition: + scope: Namespaced + permissions: + - apiGroups: + - "" + - "batch" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/log" + - "events" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "delete" + - "list" + - "patch" + - "update" + - "get" + image: "litmuschaos/ansible-runner:1.6.0" + imagePullPolicy: Always + args: + - -c + - ansible-playbook ./experiments/generic/pod_network_corruption/pod_network_corruption_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + command: + - /bin/bash + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' + + - name: TARGET_CONTAINER + value: '' + + # provide lib image + - name: LIB_IMAGE + value: 'gaiaadm/pumba:0.6.5' + + - name: NETWORK_INTERFACE + value: 'eth0' + + - name: NETWORK_PACKET_CORRUPTION_PERCENTAGE + value: '100' #in PERCENTAGE + + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds + + # Time period to wait before injection of chaos in sec + - name: RAMP_TIME + value: '' + + - name: LIB + value: 'pumba' + labels: + name: pod-network-corruption diff --git a/charts/generic/pod-network-corruption/ansible/rbac.yaml b/charts/generic/pod-network-corruption/ansible/rbac.yaml new file mode 100644 index 0000000..2bf6f83 --- /dev/null +++ b/charts/generic/pod-network-corruption/ansible/rbac.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pod-network-corruption-sa + namespace: default + labels: + name: pod-network-corruption-sa +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: pod-network-corruption-sa + namespace: default + labels: + name: pod-network-corruption-sa +rules: +- apiGroups: ["","litmuschaos.io","batch"] + resources: ["pods","jobs","events","pods/log","chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update","delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: pod-network-corruption-sa + namespace: default + labels: + name: pod-network-corruption-sa +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-network-corruption-sa +subjects: +- kind: ServiceAccount + name: pod-network-corruption-sa + namespace: default diff --git a/charts/generic/pod-network-latency/ansible/engine.yaml b/charts/generic/pod-network-latency/ansible/engine.yaml new file mode 100644 index 0000000..0d8123e --- /dev/null +++ b/charts/generic/pod-network-latency/ansible/engine.yaml @@ -0,0 +1,42 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: nginx-network-chaos + namespace: default +spec: + # It can be delete/retain + jobCleanUpPolicy: 'delete' + # It can be true/false + annotationCheck: 'true' + # It can be active/stop + engineState: 'active' + #ex. values: ns1:name=percona,ns2:run=nginx + auxiliaryAppInfo: '' + monitoring: false + appinfo: + appns: 'default' + # FYI, To see app label, apply kubectl get pods --show-labels + applabel: 'app=nginx' + appkind: 'deployment' + chaosServiceAccount: pod-network-latency-sa + experiments: + - name: pod-network-latency + spec: + components: + env: + #Container name where chaos has to be injected + - name: TARGET_CONTAINER + value: 'nginx' + + #Network interface inside target container + - name: NETWORK_INTERFACE + value: 'eth0' + + - name: LIB_IMAGE + value: 'gaiaadm/pumba:0.6.5' + + - name: NETWORK_LATENCY + value: '60000' + + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds diff --git a/charts/generic/pod-network-latency/ansible/experiment.yaml b/charts/generic/pod-network-latency/ansible/experiment.yaml new file mode 100644 index 0000000..d5797cd --- /dev/null +++ b/charts/generic/pod-network-latency/ansible/experiment.yaml @@ -0,0 +1,66 @@ +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Injects network latency on pods belonging to an app deployment +kind: ChaosExperiment +metadata: + name: pod-network-latency + version: 0.1.15 +spec: + definition: + scope: Namespaced + permissions: + - apiGroups: + - "" + - "batch" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/log" + - "events" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "patch" + - "update" + - "delete" + image: "litmuschaos/ansible-runner:1.6.0" + imagePullPolicy: Always + args: + - -c + - ansible-playbook ./experiments/generic/pod_network_latency/pod_network_latency_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + command: + - /bin/bash + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' + + - name: TARGET_CONTAINER + value: '' + + - name: NETWORK_INTERFACE + value: 'eth0' + + # provide lib image + - name: LIB_IMAGE + value: 'gaiaadm/pumba:0.6.5' + + - name: NETWORK_LATENCY + value: '60000' #in ms + + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds + + # Time period to wait before injection of chaos in sec + - name: RAMP_TIME + value: '' + + - name: LIB + value: 'pumba' + labels: + name: pod-network-latency diff --git a/charts/generic/pod-network-latency/ansible/rbac.yaml b/charts/generic/pod-network-latency/ansible/rbac.yaml new file mode 100644 index 0000000..ef6be13 --- /dev/null +++ b/charts/generic/pod-network-latency/ansible/rbac.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pod-network-latency-sa + namespace: default + labels: + name: pod-network-latency-sa +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: pod-network-latency-sa + namespace: default + labels: + name: pod-network-latency-sa +rules: +- apiGroups: ["","litmuschaos.io","batch"] + resources: ["pods","jobs","pods/log","events","chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update","delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: pod-network-latency-sa + namespace: default + labels: + name: pod-network-latency-sa +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-network-latency-sa +subjects: +- kind: ServiceAccount + name: pod-network-latency-sa + namespace: default diff --git a/charts/generic/pod-network-loss/ansible/engine.yaml b/charts/generic/pod-network-loss/ansible/engine.yaml new file mode 100644 index 0000000..cb0cc37 --- /dev/null +++ b/charts/generic/pod-network-loss/ansible/engine.yaml @@ -0,0 +1,44 @@ +# chaosengine.yaml +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: nginx-network-chaos + namespace: default +spec: + # It can be delete/retain + jobCleanUpPolicy: 'delete' + # It can be true/false + annotationCheck: 'true' + # It can be active/stop + engineState: 'active' + #ex. values: ns1:name=percona,ns2:run=nginx + auxiliaryAppInfo: '' + monitoring: false + appinfo: + appns: 'default' + # FYI, To see app label, apply kubectl get pods --show-labels + applabel: 'app=nginx' + appkind: 'deployment' + chaosServiceAccount: pod-network-loss-sa + experiments: + - name: pod-network-loss + spec: + components: + env: + #Container name where chaos has to be injected + - name: TARGET_CONTAINER + value: 'nginx' + + - name: LIB_IMAGE + value: 'gaiaadm/pumba:0.6.5' + + #Network interface inside target container + - name: NETWORK_INTERFACE + value: 'eth0' + + - name: NETWORK_PACKET_LOSS_PERCENTAGE + value: '100' + + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds + \ No newline at end of file diff --git a/charts/generic/pod-network-loss/ansible/experiment.yaml b/charts/generic/pod-network-loss/ansible/experiment.yaml new file mode 100644 index 0000000..12a78ab --- /dev/null +++ b/charts/generic/pod-network-loss/ansible/experiment.yaml @@ -0,0 +1,66 @@ +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Injects network packet loss on pods belonging to an app deployment +kind: ChaosExperiment +metadata: + name: pod-network-loss + version: 0.1.15 +spec: + definition: + scope: Namespaced + permissions: + - apiGroups: + - "" + - "batch" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/log" + - "events" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "get" + - "list" + - "patch" + - "create" + - "update" + - "delete" + image: "litmuschaos/ansible-runner:1.6.0" + imagePullPolicy: Always + args: + - -c + - ansible-playbook ./experiments/generic/pod_network_loss/pod_network_loss_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + command: + - /bin/bash + env: + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' + + - name: TARGET_CONTAINER + value: '' + + # provide lib image + - name: LIB_IMAGE + value: 'gaiaadm/pumba:0.6.5' + + - name: NETWORK_INTERFACE + value: 'eth0' + + - name: NETWORK_PACKET_LOSS_PERCENTAGE + value: '100' #in PERCENTAGE + + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds + + # ime period to wait before injection of chaos in sec + - name: RAMP_TIME + value: '' + + - name: LIB + value: 'pumba' + labels: + name: pod-network-loss diff --git a/charts/generic/pod-network-loss/ansible/rbac.yaml b/charts/generic/pod-network-loss/ansible/rbac.yaml new file mode 100644 index 0000000..4be813a --- /dev/null +++ b/charts/generic/pod-network-loss/ansible/rbac.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pod-network-loss-sa + namespace: default + labels: + name: pod-network-loss-sa +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: pod-network-loss-sa + namespace: default + labels: + name: pod-network-loss-sa +rules: +- apiGroups: ["","litmuschaos.io","batch"] + resources: ["pods","jobs","events","pods/log","chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update","delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: pod-network-loss-sa + namespace: default + labels: + name: pod-network-loss-sa +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-network-loss-sa +subjects: +- kind: ServiceAccount + name: pod-network-loss-sa + namespace: default